diff --git a/.github/scripts/branch-tags.sh b/.github/scripts/branch-tags.sh new file mode 100644 index 00000000..162478a0 --- /dev/null +++ b/.github/scripts/branch-tags.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Exit immediately if a command exits with a non-zero status +set -e + +# Function to get the previous tag +getPreviousTag() { + local tagPrefix="$1" + # List all tags and filter ones that start with tagPrefix, sort by creation date + git tag --sort=-creatordate | grep "^${tagPrefix}" | head -n 1 +} + +# Determine if we're in a GitHub Actions environment +if [ -n "$GITHUB_REF" ] && [ -n "$GITHUB_SHA" ]; then + # Use GHA environment variables + ref="$GITHUB_REF" + commitSha="${GITHUB_SHA:0:7}" +else + # Fallback to local Git repo + if [ ! -d ".git" ]; then + echo "This script must be run from the root of a Git repository or GitHub Actions." + exit 1 + fi + ref=$(git symbolic-ref HEAD) + commitSha=$(git rev-parse --short HEAD) +fi + +branchTag="" +branchStaticTag="" +prevTag="" + +if [ "$ref" == "refs/heads/main" ]; then + branchTag="head" + branchStaticTag="main-${commitSha}" + prevTag=$(getPreviousTag "main-") +elif [[ "$ref" == refs/heads/release/* ]]; then + version="${ref#refs/heads/release/}" # Extract "vX.0" + branchTag="${version}-head" + branchStaticTag="${version}-head-${commitSha}" + prevTag=$(getPreviousTag "${version}-head-") +else + gitTag=$(git tag -l --contains HEAD | head -n 1) + if [[ -n "$gitTag" ]]; then + branchTag="${gitTag}" + branchStaticTag="${gitTag}-${commitSha}" + else + branchTag="dev-${commitSha}" + branchStaticTag="dev-${commitSha}" + fi +fi + +# Output the results +echo "branch_tag=${branchTag}" +echo "branch_static_tag=${branchStaticTag}" +echo "prev_static_tag=${prevTag}" \ No newline at end of file diff --git a/.github/workflows/e2e-ci.yaml b/.github/workflows/e2e-ci.yaml index 0de3bf88..32b165c9 100644 --- a/.github/workflows/e2e-ci.yaml +++ b/.github/workflows/e2e-ci.yaml @@ -32,23 +32,41 @@ env: YQ_VERSION: v4.25.1 E2E_CI: true REPO: rancher - TAG: dev APISERVER_PORT: 8001 DEFAULT_SLEEP_TIMEOUT_SECONDS: 10 KUBECTL_WAIT_TIMEOUT: 300s DEBUG: ${{ github.event.inputs.debug || false }} + CLUSTER_NAME: 'e2e-ci-prometheus-federator' permissions: contents: write jobs: + prebuild-env: + name: Prebuild needed Env vars + runs-on: ubuntu-latest + steps: + - name: Check out the repository to the runner + uses: actions/checkout@v4 + - name: Set Branch Tag and Other Variables + id: set-vars + run: bash ./.github/scripts/branch-tags.sh >> $GITHUB_OUTPUT + outputs: + branch_tag: ${{ steps.set-vars.outputs.branch_tag }} + branch_static_tag: ${{ steps.set-vars.outputs.branch_static_tag }} + prev_tag: ${{ steps.set-vars.outputs.prev_tag }} e2e-prometheus-federator: + needs: [ + prebuild-env, + ] runs-on: ubuntu-latest + env: + TAG: ${{ needs.prebuild-env.outputs.branch_static_tag }} strategy: matrix: k3s_version: # k3d version list k3s | sed 's/+/-/' | sort -h - - ${{ github.event.inputs.k3s_version || 'v1.28.4-k3s2' }} + - ${{ github.event.inputs.k3s_version || 'v1.28.14-k3s1' }} steps: - uses: actions/checkout@v3 @@ -66,28 +84,24 @@ jobs: run: | sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq; - - name: Perform CI + name: Perform pre-e2e image build run: | - REPO=${REPO} TAG=${TAG} ./scripts/build; - REPO=${REPO} TAG=${TAG} ./scripts/package; + EMBEDED_CHART_VERSION=0.3.4 REPO=${REPO} TAG=${TAG} make build; + REPO=${REPO} TAG=${TAG} make package; - - name: Provision k3d Cluster - uses: AbsaOSS/k3d-action@v2 - # k3d will automatically create a network named k3d-test-cluster-1 with the range 172.18.0.0/16 - with: - cluster-name: "e2e-ci-prometheus-federator" - args: >- - --agents 1 - --network "nw01" - --image docker.io/rancher/k3s:${{matrix.k3s_version}} + name : Install k3d + run : ./.github/workflows/e2e/scripts/install-k3d.sh + - + name : Setup k3d cluster + run : K3S_VERSION=${{ matrix.k3s_version }} ./.github/workflows/e2e/scripts/setup-cluster.sh - name: Import Images Into k3d run: | - k3d image import ${REPO}/prometheus-federator:${TAG} -c e2e-ci-prometheus-federator; + k3d image import ${REPO}/prometheus-federator:${TAG} -c $CLUSTER_NAME; - name: Setup kubectl context run: | - kubectl config use-context k3d-e2e-ci-prometheus-federator; + kubectl config use-context "k3d-$CLUSTER_NAME"; - name: Set Up Tmate Debug Session if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.enable_tmate == 'true' }} @@ -110,39 +124,39 @@ jobs: - name: Check if Project Registration Namespace is auto-created on namespace detection run: ./.github/workflows/e2e/scripts/create-project-namespace.sh; - - # Commenting out for failure in CI but not locally - # - - # name: Create Project Monitoring Stack via ProjectHelmChart CR - # run: ./.github/workflows/e2e/scripts/create-projecthelmchart.sh; - # - - # name: Check if the Project Prometheus Stack is up - # run: ./.github/workflows/e2e/scripts/validate-project-monitoring.sh; - # - - # name: Wait for 8 minutes for enough scraping to be done to continue - # run: | - # for i in {1..48}; do sleep 10; echo "Waited $((i*10)) seconds for metrics to be populated"...; done; - # - - # name: Validate Project Prometheus Targets - # run: ./.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh; - # - - # name: Validate Project Grafana Datasources - # run: ./.github/workflows/e2e/scripts/validate-project-grafana-datasource.sh; - # - - # name: Validate Project Grafana Dashboards - # run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboards.sh; - # #- - # #name: Validate Project Grafana Dashboard Data - # #run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboard-data.sh; - # - - # name: Validate Project Prometheus Alerts - # run: ./.github/workflows/e2e/scripts/validate-project-prometheus-alerts.sh; - # - - # name: Validate Project Alertmanager - # run: ./.github/workflows/e2e/scripts/validate-project-alertmanager.sh; - # - - # name: Delete Project Prometheus Stack - # run: ./.github/workflows/e2e/scripts/delete-projecthelmchart.sh; + - + name: Create Project Monitoring Stack via ProjectHelmChart CR + run: DEFAULT_SLEEP_TIMEOUT_SECONDS=20 ./.github/workflows/e2e/scripts/create-projecthelmchart.sh; + - + name: Check if the Project Prometheus Stack is up + run: ./.github/workflows/e2e/scripts/validate-project-monitoring.sh; + - + name: Wait for 8 minutes for enough scraping to be done to continue + run: | + for i in {1..48}; do sleep 10; echo "Waited $((i*10)) seconds for metrics to be populated"...; done; + - + name: Validate Project Prometheus Targets + run: ./.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh; + - + name: Validate Project Grafana Datasources + run: ./.github/workflows/e2e/scripts/validate-project-grafana-datasource.sh; + - + name: Validate Project Grafana Dashboards + run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboards.sh; + # Re-disable this as it's been broken since Jun 28, 2023 + # More context: https://github.com/rancher/prometheus-federator/pull/73 + # - + # name: Validate Project Grafana Dashboard Data + # run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboard-data.sh; + - + name: Validate Project Prometheus Alerts + run: ./.github/workflows/e2e/scripts/validate-project-prometheus-alerts.sh; + - + name: Validate Project Alertmanager + run: ./.github/workflows/e2e/scripts/validate-project-alertmanager.sh; + - + name: Delete Project Prometheus Stack + run: ./.github/workflows/e2e/scripts/delete-projecthelmchart.sh; - name: Uninstall Prometheus Federator run: ./.github/workflows/e2e/scripts/uninstall-federator.sh; diff --git a/.github/workflows/e2e/scripts/create-project-namespace.sh b/.github/workflows/e2e/scripts/create-project-namespace.sh index f012e7b1..212441cb 100755 --- a/.github/workflows/e2e/scripts/create-project-namespace.sh +++ b/.github/workflows/e2e/scripts/create-project-namespace.sh @@ -6,8 +6,13 @@ source $(dirname $0)/entry cd $(dirname $0)/../../../.. -kubectl create namespace e2e-prometheus-federator || true -kubectl label namespace e2e-prometheus-federator field.cattle.io/projectId=p-example --overwrite +USE_RANCHER=${USE_RANCHER:-"false"} +if [ "$USE_RANCHER" = "true" ]; then + kubectl apply -f ./examples/ci/project.yaml +fi + +kubectl apply -f ./examples/ci/namespace.yaml + sleep "${DEFAULT_SLEEP_TIMEOUT_SECONDS}" if ! kubectl get namespace cattle-project-p-example; then echo "ERROR: Expected cattle-project-p-example namespace to exist after ${DEFAULT_SLEEP_TIMEOUT_SECONDS} seconds, not found" diff --git a/.github/workflows/e2e/scripts/create-projecthelmchart.sh b/.github/workflows/e2e/scripts/create-projecthelmchart.sh index 04775c90..5ce0611b 100755 --- a/.github/workflows/e2e/scripts/create-projecthelmchart.sh +++ b/.github/workflows/e2e/scripts/create-projecthelmchart.sh @@ -7,14 +7,14 @@ source $(dirname $0)/entry cd $(dirname $0)/../../../.. if [[ "${E2E_CI}" == "true" ]]; then - kubectl apply -f ./examples/ci-example.yaml + kubectl apply -f ./examples/ci/project-helm-chart.yaml else - kubectl apply -f ./examples/example.yaml + kubectl apply -f ./examples/project-helm-chart.yaml fi sleep ${DEFAULT_SLEEP_TIMEOUT_SECONDS}; if ! kubectl get -n cattle-monitoring-system job/helm-install-cattle-project-p-example-monitoring; then - echo "ERROR: Helm Install Job for Project Monitoring Stack was never created after ${KUBECTL_WAIT_TIMEOUT} seconds" + echo "ERROR: Helm Install Job for Project Monitoring Stack was never created after ${DEFAULT_SLEEP_TIMEOUT_SECONDS} seconds" exit 1 fi diff --git a/.github/workflows/e2e/scripts/delete-projecthelmchart.sh b/.github/workflows/e2e/scripts/delete-projecthelmchart.sh index 20754a6c..f4862a42 100755 --- a/.github/workflows/e2e/scripts/delete-projecthelmchart.sh +++ b/.github/workflows/e2e/scripts/delete-projecthelmchart.sh @@ -6,7 +6,11 @@ source $(dirname $0)/entry cd $(dirname $0)/../../../.. -kubectl delete -f ./examples/ci-example.yaml +if [[ "${E2E_CI}" == "true" ]]; then + kubectl delete -f ./examples/ci/project-helm-chart.yaml +else + kubectl delete -f ./examples/project-helm-chart.yaml +fi if kubectl get -n cattle-monitoring-system job/helm-delete-cattle-project-p-example-monitoring --ignore-not-found; then if ! kubectl wait --for=condition=complete --timeout="${KUBECTL_WAIT_TIMEOUT}" -n cattle-monitoring-system job/helm-delete-cattle-project-p-example-monitoring; then echo "ERROR: Helm Uninstall Job for Project Monitoring Stack never completed after ${KUBECTL_WAIT_TIMEOUT}" diff --git a/.github/workflows/e2e/scripts/generate-artifacts.sh b/.github/workflows/e2e/scripts/generate-artifacts.sh index 8a9f2363..cc44dabe 100755 --- a/.github/workflows/e2e/scripts/generate-artifacts.sh +++ b/.github/workflows/e2e/scripts/generate-artifacts.sh @@ -38,17 +38,41 @@ MANIFEST_DIRECTORY=${ARTIFACT_DIRECTORY}/manifests LOG_DIRECTORY=${ARTIFACT_DIRECTORY}/logs # Manifests - mkdir -p ${MANIFEST_DIRECTORY} -kubectl get pods -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/monitoring_pods.yaml || true -kubectl get pods -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/project_pods.yaml || true +mkdir -p ${MANIFEST_DIRECTORY}/helmcharts +mkdir -p ${MANIFEST_DIRECTORY}/helmreleases +mkdir -p ${MANIFEST_DIRECTORY}/daemonsets +mkdir -p ${MANIFEST_DIRECTORY}/deployments +mkdir -p ${MANIFEST_DIRECTORY}/jobs +mkdir -p ${MANIFEST_DIRECTORY}/statefulsets +mkdir -p ${MANIFEST_DIRECTORY}/pods +mkdir -p ${MANIFEST_DIRECTORY}/projecthelmcharts + kubectl get namespaces -o yaml > ${MANIFEST_DIRECTORY}/namespaces.yaml || true -kubectl get projecthelmchart -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/projecthelmcharts.yaml || true -kubectl get helmcharts -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmcharts.yaml || true -kubectl get helmreleases -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmreleases.yaml || true +kubectl get helmcharts -A > ${MANIFEST_DIRECTORY}/helmcharts-list.txt || true +kubectl get services -A > ${MANIFEST_DIRECTORY}/services-list.txt || true -# Logs +## cattle-monitoring-system ns manifests +kubectl get helmcharts -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmcharts/cattle-monitoring-system.yaml || true +kubectl get helmreleases -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmreleases/cattle-monitoring-system.yaml || true +kubectl get daemonset -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/daemonsets/cattle-monitoring-system.yaml || true +kubectl get deployment -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-monitoring-system.yaml || true +kubectl get job -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/jobs/cattle-monitoring-system.yaml || true +kubectl get statefulset -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-monitoring-system.yaml || true +kubectl get pods -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-monitoring-system.yaml || true +## cattle-project-p-example ns manifests +kubectl get deployment -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-project-p-example.yaml || true +kubectl get projecthelmchart -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/projecthelmcharts/cattle-project-p-example.yaml || true +kubectl get statefulset -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-project-p-example.yaml || true +kubectl get pods -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-project-p-example.yaml || true + +## cattle-project-p-example-monitoring ns manifests +kubectl get deployment -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-project-p-example-monitoring.yaml || true +kubectl get statefulset -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-project-p-example-monitoring.yaml || true +kubectl get pods -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-project-p-example-monitoring.yaml || true + +# Logs mkdir -p ${LOG_DIRECTORY}/rancher-monitoring ## Rancher Monitoring diff --git a/.github/workflows/e2e/scripts/install-federator.sh b/.github/workflows/e2e/scripts/install-federator.sh index 1c1aa44f..1a22fb9a 100755 --- a/.github/workflows/e2e/scripts/install-federator.sh +++ b/.github/workflows/e2e/scripts/install-federator.sh @@ -25,7 +25,7 @@ case "${KUBERNETES_DISTRIBUTION_TYPE}" in cluster_args="--set helmProjectOperator.helmController.enabled=false" fi ;; - *) + v1.25.*) embedded_helm_controller_fixed_version="v1.25.4" if [[ $(echo ${kubernetes_version} ${embedded_helm_controller_fixed_version} | tr " " "\n" | sort -rV | head -n 1 ) == "${embedded_helm_controller_fixed_version}" ]]; then cluster_args="--set helmProjectOperator.helmController.enabled=false" @@ -52,7 +52,7 @@ case "${KUBERNETES_DISTRIBUTION_TYPE}" in cluster_args="--set helmProjectOperator.helmController.enabled=false" fi ;; - *) + v1.25.*) embedded_helm_controller_fixed_version="v1.25.4" if [[ $(echo ${kubernetes_version} ${embedded_helm_controller_fixed_version} | tr " " "\n" | sort -rV | head -n 1 ) == "${embedded_helm_controller_fixed_version}" ]]; then cluster_args="--set helmProjectOperator.helmController.enabled=false" diff --git a/.github/workflows/e2e/scripts/install-k3d.sh b/.github/workflows/e2e/scripts/install-k3d.sh new file mode 100755 index 00000000..1aa640e6 --- /dev/null +++ b/.github/workflows/e2e/scripts/install-k3d.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e +set -x + +K3D_URL=https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh +DEFAULT_K3D_VERSION=v5.7.4 + +install_k3d(){ + local k3dVersion=${K3D_VERSION:-${DEFAULT_K3D_VERSION}} + echo -e "Downloading k3d@${k3dVersion} see: ${K3D_URL}" + curl --silent --fail ${K3D_URL} | TAG=${k3dVersion} bash +} + +install_k3d + +k3d version \ No newline at end of file diff --git a/.github/workflows/e2e/scripts/install-monitoring.sh b/.github/workflows/e2e/scripts/install-monitoring.sh index d94f503b..6c4a7bee 100755 --- a/.github/workflows/e2e/scripts/install-monitoring.sh +++ b/.github/workflows/e2e/scripts/install-monitoring.sh @@ -13,6 +13,9 @@ helm version helm repo add ${HELM_REPO} https://charts.rancher.io helm repo update +echo "Create required \`cattle-fleet-system\` namespace" +kubectl create namespace cattle-fleet-system + echo "Installing rancher monitoring crd with :\n" helm search repo ${HELM_REPO}/rancher-monitoring-crd --versions --max-col-width=0 | head -n 2 diff --git a/.github/workflows/e2e/scripts/setup-cluster.sh b/.github/workflows/e2e/scripts/setup-cluster.sh new file mode 100755 index 00000000..33049db7 --- /dev/null +++ b/.github/workflows/e2e/scripts/setup-cluster.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +set -e + +source ./scripts/version + +if [ -z "$CLUSTER_NAME" ]; then + echo "CLUSTER_NAME must be specified when setting up a cluster" + exit 1 +fi + +if [ -z "$K3S_VERSION" ]; then + echo "K3S_VERSION must be specified when setting up a cluster, use $(k3d version list k3s) to find valid versions" + exit 1 +fi + +# waits until all nodes are ready +wait_for_nodes(){ + timeout=120 + start_time=$(date +%s) + echo "wait until all agents are ready" + while : + do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + if [ $elapsed_time -ge $timeout ]; then + echo "Timeout reached, exiting..." + exit 1 + fi + + readyNodes=1 + statusList=$(kubectl get nodes --no-headers | awk '{ print $2}') + # shellcheck disable=SC2162 + while read status + do + current_time=$(date +%s) + elapsed_time=$((current_time - start_time)) + if [ $elapsed_time -ge $timeout ]; then + echo "Timeout reached, exiting..." + exit 1 + fi + if [ "$status" == "NotReady" ] || [ "$status" == "" ] + then + readyNodes=0 + break + fi + done <<< "$(echo -e "$statusList")" + # all nodes are ready; exit + if [[ $readyNodes == 1 ]] + then + break + fi + sleep 1 + done +} + +k3d cluster delete "$CLUSTER_NAME" || true +k3d cluster create "$CLUSTER_NAME" --image "docker.io/rancher/k3s:${K3S_VERSION}" + +wait_for_nodes + +echo "$CLUSTER_NAME ready" + +kubectl cluster-info --context "k3d-${CLUSTER_NAME}" +kubectl config use-context "k3d-${CLUSTER_NAME}" +kubectl get nodes -o wide \ No newline at end of file diff --git a/.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh b/.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh index 12223d29..3e81a119 100755 --- a/.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh +++ b/.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh @@ -25,7 +25,8 @@ yq '.data.activeTargets[] | {.labels.job: .health}' ${tmp_targets_yaml} > ${tmp_ echo "TARGETS:"; if [[ $(yq '. | length' ${tmp_targets_up_yaml}) != "4" ]]; then - echo "ERROR: Expected exacty 4 targets to be up in Project Prometheus: federate, cattle-project-p-example-m-alertmanager, cattle-project-p-example-m-prometheus, cattle-project-p-example-monitoring-grafana" + echo "ERROR: Expected exactly 4 targets but found $(yq '. | length' ${tmp_targets_up_yaml})." + echo "Expected Targets in Project Prometheus: federate, cattle-project-p-example-m-alertmanager, cattle-project-p-example-m-prometheus, cattle-project-p-example-monitoring-grafana" echo "TARGETS:" cat ${tmp_targets_up_yaml} exit 1 diff --git a/.gitignore b/.gitignore index 650bf217..bfae014b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /.cache /bin +/build /dist *.swp .idea diff --git a/Makefile b/Makefile index f944f016..e7720925 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,6 @@ $(TARGETS): .DEFAULT_GOAL := default # Charts Build Scripts - pull-scripts: ./scripts/pull-scripts diff --git a/assets/prometheus-federator/prometheus-federator-0.4.3-rc.1.tgz b/assets/prometheus-federator/prometheus-federator-0.4.3-rc.1.tgz new file mode 100644 index 00000000..0f2e6e0c Binary files /dev/null and b/assets/prometheus-federator/prometheus-federator-0.4.3-rc.1.tgz differ diff --git a/charts/prometheus-federator/0.4.3-rc.1/Chart.yaml b/charts/prometheus-federator/0.4.3-rc.1/Chart.yaml new file mode 100644 index 00000000..eea5005c --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/Chart.yaml @@ -0,0 +1,18 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Prometheus Federator + catalog.cattle.io/namespace: cattle-monitoring-system + catalog.cattle.io/os: linux,windows + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1 + catalog.cattle.io/release-name: prometheus-federator +apiVersion: v2 +appVersion: 0.4.3-rc.1 +dependencies: +- name: helmProjectOperator + repository: file://./charts/helmProjectOperator + version: 0.3.1 +description: Prometheus Federator +icon: https://raw.githubusercontent.com/rancher/prometheus-federator/main/assets/logos/prometheus-federator.svg +name: prometheus-federator +version: 0.4.3-rc.1 diff --git a/charts/prometheus-federator/0.4.3-rc.1/README.md b/charts/prometheus-federator/0.4.3-rc.1/README.md new file mode 100644 index 00000000..7da4edfc --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/README.md @@ -0,0 +1,120 @@ +# Prometheus Federator + +This chart is deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains: +- [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) (deployed via an embedded Helm chart) +- Default PrometheusRules and Grafana dashboards based on the collection of community-curated resources from [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) +- Default ServiceMonitors that watch the deployed resources + +> **Important Note: Prometheus Federator is designed to be deployed alongside an existing Prometheus Operator deployment in a cluster that has already installed the Prometheus Operator CRDs.** + +By default, the chart is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. + +## Pre-Installation: Using Prometheus Federator with Rancher and rancher-monitoring + +If you are running your cluster on [Rancher](https://rancher.com/) and already have [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/) deployed onto your cluster, Prometheus Federator's default configuration should already be configured to work with your existing Cluster Monitoring Stack; however, here are some notes on how we recommend you configure rancher-monitoring to optimize the security and usability of Prometheus Federator in your cluster: + +### Ensure the cattle-monitoring-system namespace is placed into the System Project (or a similarly locked down Project that has access to other Projects in the cluster) + +Prometheus Operator's security model expects that the namespace it is deployed into (`cattle-monitoring-system`) has limited access for anyone except Cluster Admins to avoid privilege escalation via execing into Pods (such as the Jobs executing Helm operations). In addition, deploying Prometheus Federator and all Project Prometheus stacks into the System Project ensures that the each Project Prometheus is able to reach out to scrape workloads across all Projects (even if Network Policies are defined via Project Network Isolation) but has limited access for Project Owners, Project Members, and other users to be able to access data they shouldn't have access to (i.e. being allowed to exec into pods, set up the ability to scrape namespaces outside of a given Project, etc.). + +### Configure rancher-monitoring to only watch for resources created by the Helm chart itself + +Since each Project Monitoring Stack will watch the other namespaces and collect additional custom workload metrics or dashboards already, it's recommended to configure the following settings on all selectors to ensure that the Cluster Prometheus Stack only monitors resources created by the Helm Chart itself: + +``` +matchLabels: + release: "rancher-monitoring" +``` + +The following selector fields are recommended to have this value: +- `.Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector` +- `.Values.prometheus.prometheusSpec.serviceMonitorSelector` +- `.Values.prometheus.prometheusSpec.podMonitorSelector` +- `.Values.prometheus.prometheusSpec.ruleSelector` +- `.Values.prometheus.prometheusSpec.probeSelector` + +Once this setting is turned on, you can always create ServiceMonitors or PodMonitors that are picked up by the Cluster Prometheus by adding the label `release: "rancher-monitoring"` to them (in which case they will be ignored by Project Monitoring Stacks automatically by default, even if the namespace in which those ServiceMonitors or PodMonitors reside in are not system namespaces). + +> Note: If you don't want to allow users to be able to create ServiceMonitors and PodMonitors that aggregate into the Cluster Prometheus in Project namespaces, you can additionally set the namespaceSelectors on the chart to only target system namespaces (which must contain `cattle-monitoring-system` and `cattle-dashboards`, where resources are deployed into by default by rancher-monitoring; you will also need to monitor the `default` namespace to get apiserver metrics or create a custom ServiceMonitor to scrape apiserver metrics from the Service residing in the default namespace) to limit your Cluster Prometheus from picking up other Prometheus Operator CRs; in that case, it would be recommended to turn `.Values.prometheus.prometheusSpec.ignoreNamespaceSelectors=true` to allow you to define ServiceMonitors that can monitor non-system namespaces from within a system namespace. + +In addition, if you modified the default `.Values.grafana.sidecar.*.searchNamespace` values on the Grafana Helm subchart for Monitoring V2, it is also recommended to remove the overrides or ensure that your defaults are scoped to only system namespaces for the following values: +- `.Values.grafana.sidecar.dashboards.searchNamespace` (default `cattle-dashboards`) +- `.Values.grafana.sidecar.datasources.searchNamespace` (default `null`, which means it uses the release namespace `cattle-monitoring-system`) +- `.Values.grafana.sidecar.plugins.searchNamespace` (default `null`, which means it uses the release namespace `cattle-monitoring-system`) +- `.Values.grafana.sidecar.notifiers.searchNamespace` (default `null`, which means it uses the release namespace `cattle-monitoring-system`) + +### Increase the CPU / memory limits of the Cluster Prometheus + +Depending on a cluster's setup, it's generally recommended to give a large amount of dedicated memory to the Cluster Prometheus to avoid restarts due to out-of-memory errors (OOMKilled), usually caused by churn created in the cluster that causes a large number of high cardinality metrics to be generated and ingested by Prometheus within one block of time; this is one of the reasons why the default Rancher Monitoring stack expects around 4GB of RAM to be able to operate in a normal-sized cluster. However, when introducing Project Monitoring Stacks that are all sending `/federate` requests to the same Cluster Prometheus and are reliant on the Cluster Prometheus being "up" to federate that system data on their namespaces, it's even more important that the Cluster Prometheus has an ample amount of CPU / memory assigned to it to prevent an outage that can cause data gaps across all Project Prometheis in the cluster. + +> Note: There are no specific recommendations on how much memory the Cluster Prometheus should be configured with since it depends entirely on the user's setup (namely the likelihood of encountering a high churn rate and the scale of metrics that could be generated at that time); it generally varies per setup. + +## How does the operator work? + +1. On deploying this chart, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `monitoring.cattle.io/v1alpha1` (also known as "Project Monitors" in the Rancher UI) in a **Project Registration Namespace (`cattle-project-`)**. +2. On seeing each ProjectHelmChartCR, the operator will automatically deploy a Project Prometheus stack on the Project Owner's behalf in the **Project Release Namespace (`cattle-project--monitoring`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**. +3. RBAC will automatically be assigned in the Project Release Namespace to allow users to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack deployed; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) (see below for more information about configuring RBAC). + +### What is a Project? + +In Prometheus Federator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`; by default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given [Rancher](https://rancher.com/) Project. + +### Configuring the Helm release created by a ProjectHelmChart + +The `spec.values` of this ProjectHelmChart resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either: +- View to the chart's definition located at [`rancher/prometheus-federator` under `charts/rancher-project-monitoring`](https://github.com/rancher/prometheus-federator/blob/main/charts/rancher-project-monitoring) (where the chart version will be tied to the version of this operator) +- Look for the ConfigMap named `monitoring.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `prometheus-federator` binary). + +### Namespaces + +As a Project Operator based on [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator), Prometheus Federator has three different classifications of namespaces that the operator looks out for: +1. **Operator / System Namespace**: this is the namespace that the operator is deployed into (e.g. `cattle-monitoring-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.** +2. **Project Registration Namespace (`cattle-project-`)**: this is the set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace (see more details below). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace**. +> Note: Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided (which is set to `field.cattle.io/projectId` by default); this indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g. this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted) or it is no longer watching that project (because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects). These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project +> Note: if `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace +3. **Project Release Namespace (`cattle-project--monitoring`)**: this is the set of namespaces that the operator deploys Project Monitoring Stacks within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Project Monitoring Stack based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Prometheus Federator.** +> Note: Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue` (which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified) whenever a ProjectHelmChart is specified in a Project Registration Namespace +> Note: Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above) +> Note: if `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace + +### Helm Resources (HelmChart, HelmRelease) + +On deploying a ProjectHelmChart, the Prometheus Federator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn: +- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): this custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR; this CR is automatically updated on changes to the ProjectHelmChart (e.g. modifying the values.yaml) or changes to the underlying Project definition (e.g. adding or removing namespaces from a project). +> **Important Note: If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation; however, this is generally only accessible by a Cluster Admin.** +- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): this custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR). +> Note: HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place; to view these events, you can use `kubectl describe helmrelease -n `; you can also view the logs on this operator to see when changes are detected and which resources were attempted to be modified + +Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users. + +### RBAC + +As described in the section on namespaces above, Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: +- ClusterRoleBindings +- RoleBindings in the Project Release Namespace + +On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin`, `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit`, or `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view`; by default, these roleRefs correspond will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +> Note: for Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. + +If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: +- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` +- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` + +By default, the `rancher-project-monitoring` (the underlying chart deployed by Prometheus Federator) creates three default Roles per Project Release Namespace that provide `admin`, `edit`, and `view` users to permissions to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack to provide least privilege; however, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or created Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. + +### Advanced Helm Project Operator Configuration + +|Value|Configuration| +|---|---------------------------| +|`helmProjectOperator.valuesOverride`| Allows an Operator to override values that are set on each ProjectHelmChart deployment on an operator-level; user-provided options (specified on the `spec.values` of the ProjectHelmChart) are automatically overridden if operator-level values are provided. For an exmaple, see how the default value overrides `federate.targets` (note: when overriding list values like `federate.targets`, user-provided list values will **not** be concatenated) | +|`helmProjectOperator.projectReleaseNamespaces.labelValues`| The value of the Project that all Project Release Namespaces should be auto-imported into (via label and annotation). Not recommended to be overridden on a Rancher setup. | +|`helmProjectOperator.otherSystemProjectLabelValues`| Other namespaces that the operator should treat as a system namespace that should not be monitored. By default, all namespaces that match `global.cattle.systemProjectId` will not be matched. `cattle-monitoring-system`, `cattle-dashboards`, and `kube-system` are explicitly marked as system namespaces as well, regardless of label or annotation. | +|`helmProjectOperator.releaseRoleBindings.aggregate`| Whether to automatically create RBAC resources in Project Release namespaces +|`helmProjectOperator.releaseRoleBindings.clusterRoleRefs.`| ClusterRoles to reference to discover subjects to create RoleBindings for in the Project Release Namespace for all corresponding Project Release Roles. See RBAC above for more information | +|`helmProjectOperator.hardenedNamespaces.enabled`| Whether to automatically patch the default ServiceAccount with `automountServiceAccountToken: false` and create a default NetworkPolicy in all managed namespaces in the cluster; the default values ensure that the creation of the namespace does not break a CIS 1.16 hardened scan | +|`helmProjectOperator.hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace | +|`helmProjectOperator.helmController.enabled`| Whether to enable an embedded k3s-io/helm-controller instance within the Helm Project Operator. Should be disabled for RKE2/K3s clusters before v1.23.14 / v1.24.8 / v1.25.4 since RKE2/K3s clusters already run Helm Controller at a cluster-wide level to manage internal Kubernetes components | +|`helmProjectOperator.helmLocker.enabled`| Whether to enable an embedded rancher/helm-locker instance within the Helm Project Operator. | diff --git a/charts/prometheus-federator/0.4.3-rc.1/app-README.md b/charts/prometheus-federator/0.4.3-rc.1/app-README.md new file mode 100644 index 00000000..99fa7ca1 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/app-README.md @@ -0,0 +1,27 @@ +# Prometheus Federator + +This chart deploys an operator that manages Project Monitoring Stacks composed of the following set of resources that are scoped to project namespaces: +- [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) (deployed via an embedded Helm chart) +- Default PrometheusRules and Grafana dashboards based on the collection of community-curated resources from [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) +- Default ServiceMonitors that watch the deployed Prometheus, Grafana, and Alertmanager + +Since this Project Monitoring Stack deploys Prometheus Operator CRs, an existing Prometheus Operator instance must already be deployed in the cluster for Prometheus Federator to successfully be able to deploy Project Monitoring Stacks. It is recommended to use [`rancher-monitoring`](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/) for this. For more information on how the chart works or advanced configurations, please read the `README.md`. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`. +​ +> **Note:** +> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`. + ​ +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets. +Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. +​ +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. \ No newline at end of file diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/Chart.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/Chart.yaml new file mode 100644 index 00000000..4d81896c --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/Chart.yaml @@ -0,0 +1,18 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Helm Project Operator + catalog.cattle.io/kube-version: '>=1.16.0-0' + catalog.cattle.io/namespace: cattle-helm-system + catalog.cattle.io/os: linux,windows + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1 + catalog.cattle.io/rancher-version: '>= 2.6.0-0' + catalog.cattle.io/release-name: helm-project-operator +apiVersion: v2 +appVersion: v0.3.1 +description: Helm Project Operator +maintainers: +- email: dan.pock@suse.com + name: Dan Pock +name: helmProjectOperator +version: 0.3.1 diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/README.md b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/README.md new file mode 100644 index 00000000..9623ff22 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/README.md @@ -0,0 +1,77 @@ +# Helm Project Operator + +## How does the operator work? + +1. On deploying a Helm Project Operator, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `dummy.cattle.io/v1alpha1` in a **Project Registration Namespace (`cattle-project-`)**. +2. On seeing each ProjectHelmChartCR, the operator will automatically deploy the embedded Helm chart on the Project Owner's behalf in the **Project Release Namespace (`cattle-project--dummy`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**. +3. RBAC will automatically be assigned in the Project Release Namespace to allow users to based on Role created in the Project Release Namespace with a given set of labels; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) (see below for more information about configuring RBAC). + +### What is a Project? + +In Helm Project Operator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`; by default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given [Rancher](https://rancher.com/) Project. + +### What is a ProjectHelmChart? + +A ProjectHelmChart is an instance of a (project-scoped) Helm chart deployed on behalf of a user who has permissions to create ProjectHelmChart resources in a Project Registration namespace. + +Generally, the best way to think about the ProjectHelmChart model is by comparing it to two other models: +1. Managed Kubernetes providers (EKS, GKE, AKS, etc.): in this model, a user has the ability to say "I want a Kubernetes cluster" but the underlying cloud provider is responsible for provisioning the infrastructure and offering **limited view and access** of the underlying resources created on their behalf; similarly, Helm Project Operator allows a Project Owner to say "I want this Helm chart deployed", but the underlying Operator is responsible for "provisioning" (deploying) the Helm chart and offering **limited view and access** of the underlying Kubernetes resources created on their behalf (based on configuring "least-privilege" Kubernetes RBAC for the Project Owners / Members in the newly created Project Release Namespace). +2. Dynamically-provisioned Persistent Volumes: in this model, a single resource (PersistentVolume) exists that allows you to specify a Storage Class that actually implements provisioning the underlying storage via a Storage Class Provisioner (e.g. Longhorn). Similarly, the ProjectHelmChart exists that allows you to specify a `spec.helmApiVersion` ("storage class") that actually implements deploying the underlying Helm chart via a Helm Project Operator (e.g. [`rancher/prometheus-federator`](https://github.com/rancher/prometheus-federator)). + +### Configuring the Helm release created by a ProjectHelmChart + +The `spec.values` of this ProjectHelmChart resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either: +- View to the chart's definition located at [`rancher/helm-project-operator` under `charts/project-operator-example`](https://github.com/rancher/helm-project-operator/blob/main/charts/project-operator-example) (where the chart version will be tied to the version of this operator) +- Look for the ConfigMap named `dummy.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `helm-project-operator` binary). + +### Namespaces + +All Helm Project Operators have three different classifications of namespaces that the operator looks out for: +1. **Operator / System Namespace**: this is the namespace that the operator is deployed into (e.g. `cattle-helm-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.** +2. **Project Registration Namespace (`cattle-project-`)**: this is the set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace (see more details below). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace**. +> Note: Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided (which is set to `field.cattle.io/projectId` by default); this indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g. this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted) or it is no longer watching that project (because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects). These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project +> Note: if `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace +3. **Project Release Namespace (`cattle-project--dummy`)**: this is the set of namespaces that the operator deploys Helm charts within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Helm charts based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Helm Project Operator.** +> Note: Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue` (which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified) whenever a ProjectHelmChart is specified in a Project Registration Namespace +> Note: Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above) +> Note: if `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace + +### Helm Resources (HelmChart, HelmRelease) + +On deploying a ProjectHelmChart, the Helm Project Operator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn: +- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): this custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR; this CR is automatically updated on changes to the ProjectHelmChart (e.g. modifying the values.yaml) or changes to the underlying Project definition (e.g. adding or removing namespaces from a project). +> **Important Note: If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation; however, this is generally only accessible by a Cluster Admin.** +- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): this custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR). +> Note: HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place; to view these events, you can use `kubectl describe helmrelease -n `; you can also view the logs on this operator to see when changes are detected and which resources were attempted to be modified + +Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users. + +### RBAC + +As described in the section on namespaces above, Helm Project Operator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: +- ClusterRoleBindings +- RoleBindings in the Project Release Namespace + +On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin`, `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit`, or `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view`; by default, these roleRefs correspond will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +> Note: for Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. + +If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: +- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` +- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` + +By default, the `project-operator-example` (the underlying chart deployed by Helm Project Operator) does not create any default roles; however, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or created Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. + +### Advanced Helm Project Operator Configuration + +|Value|Configuration| +|---|---------------------------| +|`valuesOverride`| Allows an Operator to override values that are set on each ProjectHelmChart deployment on an operator-level; user-provided options (specified on the `spec.values` of the ProjectHelmChart) are automatically overridden if operator-level values are provided. For an exmaple, see how the default value overrides `federate.targets` (note: when overriding list values like `federate.targets`, user-provided list values will **not** be concatenated) | +|`projectReleaseNamespaces.labelValues`| The value of the Project that all Project Release Namespaces should be auto-imported into (via label and annotation). Not recommended to be overridden on a Rancher setup. | +|`otherSystemProjectLabelValues`| Other namespaces that the operator should treat as a system namespace that should not be monitored. By default, all namespaces that match `global.cattle.systemProjectId` will not be matched. `kube-system` is explicitly marked as a system namespace as well, regardless of label or annotation. | +|`releaseRoleBindings.aggregate`| Whether to automatically create RBAC resources in Project Release namespaces +|`releaseRoleBindings.clusterRoleRefs.`| ClusterRoles to reference to discover subjects to create RoleBindings for in the Project Release Namespace for all corresponding Project Release Roles. See RBAC above for more information | +|`hardenedNamespaces.enabled`| Whether to automatically patch the default ServiceAccount with `automountServiceAccountToken: false` and create a default NetworkPolicy in all managed namespaces in the cluster; the default values ensure that the creation of the namespace does not break a CIS 1.16 hardened scan | +|`hardenedNamespaces.configuration`| The configuration to be supplied to the default ServiceAccount or auto-generated NetworkPolicy on managing a namespace | +|`helmController.enabled`| Whether to enable an embedded k3s-io/helm-controller instance within the Helm Project Operator. Should be disabled for RKE2 clusters since RKE2 clusters already run Helm Controller to manage internal Kubernetes components | +|`helmLocker.enabled`| Whether to enable an embedded rancher/helm-locker instance within the Helm Project Operator. | diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/app-readme.md b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/app-readme.md new file mode 100644 index 00000000..fd551467 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/app-readme.md @@ -0,0 +1,20 @@ +# Helm Project Operator + +This chart installs the example [Helm Project Operator](https://github.com/rancher/helm-project-operator) onto your cluster. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `global.cattle.psp.enabled` set to `false` if it has been previously set to `true`. +​ +> **Note:** +> In this chart release, any previous field that was associated with any PSP resources have been removed in favor of a single global field: `global.cattle.psp.enabled`. + ​ +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, please consult the Rancher docs on how to clean up your Helm release secrets. +Upon setting `global.cattle.psp.enabled` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. +​ +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Rancher docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. \ No newline at end of file diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/questions.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/questions.yaml new file mode 100644 index 00000000..054361a7 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/questions.yaml @@ -0,0 +1,43 @@ +questions: +- variable: global.cattle.psp.enabled + default: "false" + description: "Flag to enable or disable the installation of PodSecurityPolicies by this chart in the target cluster. If the cluster is running Kubernetes 1.25+, you must update this value to false." + label: "Enable PodSecurityPolicies" + type: boolean + group: "Security Settings" +- variable: helmController.enabled + label: Enable Embedded Helm Controller + description: 'Note: If you are running this chart in an RKE2 cluster, this should be disabled.' + type: boolean + group: Helm Controller +- variable: helmLocker.enabled + label: Enable Embedded Helm Locker + type: boolean + group: Helm Locker +- variable: projectReleaseNamespaces.labelValue + label: Project Release Namespace Project ID + description: By default, the System Project is selected. This can be overriden to a different Project (e.g. p-xxxxx) + type: string + required: false + group: Namespaces +- variable: releaseRoleBindings.clusterRoleRefs.admin + label: Admin ClusterRole + description: By default, admin selects Project Owners. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: admin + required: false + group: RBAC +- variable: releaseRoleBindings.clusterRoleRefs.edit + label: Edit ClusterRole + description: By default, edit selects Project Members. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: edit + required: false + group: RBAC +- variable: releaseRoleBindings.clusterRoleRefs.view + label: View ClusterRole + description: By default, view selects Read-Only users. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: view + required: false + group: RBAC diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/NOTES.txt b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/NOTES.txt new file mode 100644 index 00000000..32baeebc --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/NOTES.txt @@ -0,0 +1,2 @@ +{{ $.Chart.Name }} has been installed. Check its status by running: + kubectl --namespace {{ template "helm-project-operator.namespace" . }} get pods -l "release={{ $.Release.Name }}" diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/_helpers.tpl b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/_helpers.tpl new file mode 100644 index 00000000..97dd6b36 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/_helpers.tpl @@ -0,0 +1,66 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# Helm Project Operator + +{{/* vim: set filetype=mustache: */}} +{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}} +{{- define "helm-project-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "helm-project-operator.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* Create chart name and version as used by the chart label. */}} +{{- define "helm-project-operator.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end }} + +{{/* Generate basic labels */}} +{{- define "helm-project-operator.labels" -}} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}" +app.kubernetes.io/part-of: {{ template "helm-project-operator.name" . }} +chart: {{ template "helm-project-operator.chartref" . }} +release: {{ $.Release.Name | quote }} +heritage: {{ $.Release.Service | quote }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/cleanup.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/cleanup.yaml new file mode 100644 index 00000000..98675642 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/cleanup.yaml @@ -0,0 +1,82 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "helm-project-operator.name" . }}-cleanup + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed +spec: + template: + metadata: + name: {{ template "helm-project-operator.name" . }}-cleanup + labels: {{ include "helm-project-operator.labels" . | nindent 8 }} + app: {{ template "helm-project-operator.name" . }} + spec: + serviceAccountName: {{ template "helm-project-operator.name" . }} +{{- if .Values.cleanup.securityContext }} + securityContext: {{ toYaml .Values.cleanup.securityContext | nindent 8 }} +{{- end }} + initContainers: + - name: add-cleanup-annotations + image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - /bin/sh + - -c + - > + echo "Labeling all ProjectHelmCharts with helm.cattle.io/helm-project-operator-cleanup=true"; + EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }}; + IFS=$'\n'; + for namespace in $(kubectl get namespaces -l helm.cattle.io/helm-project-operated=true --no-headers -o=custom-columns=NAME:.metadata.name); do + for projectHelmChartAndHelmApiVersion in $(kubectl get projecthelmcharts -n ${namespace} --no-headers -o=custom-columns=NAME:.metadata.name,HELMAPIVERSION:.spec.helmApiVersion); do + projectHelmChartAndHelmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | xargs); + projectHelmChart=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f1); + helmApiVersion=$(echo ${projectHelmChartAndHelmApiVersion} | cut -d' ' -f2); + if [[ ${helmApiVersion} != ${EXPECTED_HELM_API_VERSION} ]]; then + echo "Skipping marking ${namespace}/${projectHelmChart} with cleanup annotation since spec.helmApiVersion: ${helmApiVersion} is not ${EXPECTED_HELM_API_VERSION}"; + continue; + fi; + kubectl label projecthelmcharts -n ${namespace} ${projectHelmChart} helm.cattle.io/helm-project-operator-cleanup=true --overwrite; + done; + done; +{{- if .Values.cleanup.resources }} + resources: {{ toYaml .Values.cleanup.resources | nindent 12 }} +{{- end }} +{{- if .Values.cleanup.containerSecurityContext }} + securityContext: {{ toYaml .Values.cleanup.containerSecurityContext | nindent 12 }} +{{- end }} + containers: + - name: ensure-subresources-deleted + image: {{ template "system_default_registry" . }}{{ .Values.cleanup.image.repository }}:{{ .Values.cleanup.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + SYSTEM_NAMESPACE={{ .Release.Namespace }} + EXPECTED_HELM_API_VERSION={{ .Values.helmApiVersion }}; + HELM_API_VERSION_TRUNCATED=$(echo ${EXPECTED_HELM_API_VERSION} | cut -d'/' -f0); + echo "Ensuring HelmCharts and HelmReleases are deleted from ${SYSTEM_NAMESPACE}..."; + while [[ "$(kubectl get helmcharts,helmreleases -l helm.cattle.io/helm-api-version=${HELM_API_VERSION_TRUNCATED} -n ${SYSTEM_NAMESPACE} 2>&1)" != "No resources found in ${SYSTEM_NAMESPACE} namespace." ]]; do + echo "waiting for HelmCharts and HelmReleases to be deleted from ${SYSTEM_NAMESPACE}... sleeping 3 seconds"; + sleep 3; + done; + echo "Successfully deleted all HelmCharts and HelmReleases in ${SYSTEM_NAMESPACE}!"; +{{- if .Values.cleanup.resources }} + resources: {{ toYaml .Values.cleanup.resources | nindent 12 }} +{{- end }} +{{- if .Values.cleanup.containerSecurityContext }} + securityContext: {{ toYaml .Values.cleanup.containerSecurityContext | nindent 12 }} +{{- end }} + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.cleanup.nodeSelector }} + {{- toYaml .Values.cleanup.nodeSelector | nindent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.cleanup.tolerations }} + {{- toYaml .Values.cleanup.tolerations | nindent 8 }} + {{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/clusterrole.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/clusterrole.yaml new file mode 100644 index 00000000..60ed263b --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/clusterrole.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-admin + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/finalizers + - projecthelmcharts/status + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-edit + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-edit: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/status + verbs: + - 'get' + - 'list' + - 'watch' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "helm-project-operator.name" . }}-view + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + {{- end }} +rules: +- apiGroups: + - helm.cattle.io + resources: + - projecthelmcharts + - projecthelmcharts/status + verbs: + - 'get' + - 'list' + - 'watch' +{{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/configmap.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/configmap.yaml new file mode 100644 index 00000000..d4def157 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/configmap.yaml @@ -0,0 +1,14 @@ +## Note: If you add another entry to this ConfigMap, make sure a corresponding env var is set +## in the deployment of the operator to ensure that a Helm upgrade will force the operator +## to reload the values in the ConfigMap and redeploy +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "helm-project-operator.name" . }}-config + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +data: + hardened.yaml: |- +{{ .Values.hardenedNamespaces.configuration | toYaml | indent 4 }} + values.yaml: |- +{{ .Values.valuesOverride | toYaml | indent 4 }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/deployment.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/deployment.yaml new file mode 100644 index 00000000..33b81e72 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/deployment.yaml @@ -0,0 +1,126 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +spec: + {{- if .Values.replicas }} + replicas: {{ .Values.replicas }} + {{- end }} + selector: + matchLabels: + app: {{ template "helm-project-operator.name" . }} + release: {{ $.Release.Name | quote }} + template: + metadata: + labels: {{ include "helm-project-operator.labels" . | nindent 8 }} + app: {{ template "helm-project-operator.name" . }} + spec: + containers: + - name: {{ template "helm-project-operator.name" . }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + args: + - {{ template "helm-project-operator.name" . }} + - --namespace={{ template "helm-project-operator.namespace" . }} + - --controller-name={{ template "helm-project-operator.name" . }} + - --values-override-file=/etc/helmprojectoperator/config/values.yaml +{{- if .Values.global.cattle.systemDefaultRegistry }} + - --system-default-registry={{ .Values.global.cattle.systemDefaultRegistry }} +{{- end }} +{{- if .Values.global.cattle.url }} + - --cattle-url={{ .Values.global.cattle.url }} +{{- end }} +{{- if .Values.global.cattle.projectLabel }} + - --project-label={{ .Values.global.cattle.projectLabel }} +{{- end }} +{{- if not .Values.projectReleaseNamespaces.enabled }} + - --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }} +{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }} + - --system-project-label-values={{ join "," (append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId) }} +{{- else if len .Values.otherSystemProjectLabelValues }} + - --system-project-label-values={{ join "," .Values.otherSystemProjectLabelValues }} +{{- end }} +{{- if .Values.projectReleaseNamespaces.enabled }} +{{- if .Values.projectReleaseNamespaces.labelValue }} + - --project-release-label-value={{ .Values.projectReleaseNamespaces.labelValue }} +{{- else if .Values.global.cattle.systemProjectId }} + - --project-release-label-value={{ .Values.global.cattle.systemProjectId }} +{{- end }} +{{- end }} +{{- if .Values.global.cattle.clusterId }} + - --cluster-id={{ .Values.global.cattle.clusterId }} +{{- end }} +{{- if .Values.releaseRoleBindings.aggregate }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.admin }} + - --admin-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.admin }} +{{- end }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.edit }} + - --edit-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.edit }} +{{- end }} +{{- if .Values.releaseRoleBindings.clusterRoleRefs.view }} + - --view-cluster-role={{ .Values.releaseRoleBindings.clusterRoleRefs.view }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.hardenedNamespaces.enabled }} + - --hardening-options-file=/etc/helmprojectoperator/config/hardening.yaml +{{- else }} + - --disable-hardening +{{- end }} +{{- if .Values.debug }} + - --debug + - --debug-level={{ .Values.debugLevel }} +{{- end }} +{{- if not .Values.helmController.enabled }} + - --disable-embedded-helm-controller +{{- else }} + - --helm-job-image={{ template "system_default_registry" . }}{{ .Values.helmController.job.image.repository }}:{{ .Values.helmController.job.image.tag }} +{{- end }} +{{- if not .Values.helmLocker.enabled }} + - --disable-embedded-helm-locker +{{- end }} +{{- if .Values.additionalArgs }} +{{- toYaml .Values.additionalArgs | nindent 10 }} +{{- end }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ## Note: The below two values only exist to force Helm to upgrade the deployment on + ## a change to the contents of the ConfigMap during an upgrade. Neither serve + ## any practical purpose and can be removed and replaced with a configmap reloader + ## in a future change if dynamic updates are required. + - name: HARDENING_OPTIONS_SHA_256_HASH + value: {{ .Values.hardenedNamespaces.configuration | toYaml | sha256sum }} + - name: VALUES_OVERRIDE_SHA_256_HASH + value: {{ .Values.valuesOverride | toYaml | sha256sum }} +{{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} +{{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: {{ toYaml .Values.containerSecurityContext | nindent 12 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/helmprojectoperator/config" + serviceAccountName: {{ template "helm-project-operator.name" . }} +{{- if .Values.securityContext }} + securityContext: {{ toYaml .Values.securityContext | nindent 8 }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.nodeSelector }} +{{- toYaml .Values.nodeSelector | nindent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{- toYaml .Values.tolerations | nindent 8 }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "helm-project-operator.name" . }}-config diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/psp.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/psp.yaml new file mode 100644 index 00000000..73dcc456 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/psp.yaml @@ -0,0 +1,68 @@ +{{- if .Values.global.cattle.psp.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +{{- if .Values.global.rbac.pspAnnotations }} + annotations: {{ toYaml .Values.global.rbac.pspAnnotations | nindent 4 }} +{{- end }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +rules: +{{- if semverCompare "> 1.15.0-0" .Capabilities.KubeVersion.GitVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "helm-project-operator.name" . }}-psp +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "helm-project-operator.name" . }}-psp + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "helm-project-operator.name" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} +{{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/rbac.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/rbac.yaml new file mode 100644 index 00000000..b1c40920 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/rbac.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "helm-project-operator.name" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "cluster-admin" # see note below +subjects: +- kind: ServiceAccount + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "helm-project-operator.name" . }} + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} + app: {{ template "helm-project-operator.name" . }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }} +{{- end }} +# --- +# NOTE: +# As of now, due to the fact that the k3s-io/helm-controller can only deploy jobs that are cluster-bound to the cluster-admin +# ClusterRole, the only way for this operator to be able to perform that binding is if it is also bound to the cluster-admin ClusterRole. +# +# As a result, this ClusterRoleBinding will be left as a work-in-progress until changes are made in k3s-io/helm-controller to allow us to grant +# only scoped down permissions to the Job that is deployed. diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/system-namespaces-configmap.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/system-namespaces-configmap.yaml new file mode 100644 index 00000000..f4c85254 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/system-namespaces-configmap.yaml @@ -0,0 +1,62 @@ +{{- if .Values.systemNamespacesConfigMap.create }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +data: + system-namespaces.json: |- + { +{{- if .Values.projectReleaseNamespaces.enabled }} +{{- if .Values.projectReleaseNamespaces.labelValue }} + "projectReleaseLabelValue": {{ .Values.projectReleaseNamespaces.labelValue | quote }}, +{{- else if .Values.global.cattle.systemProjectId }} + "projectReleaseLabelValue": {{ .Values.global.cattle.systemProjectId | quote }}, +{{- else }} + "projectReleaseLabelValue": "", +{{- end }} +{{- else }} + "projectReleaseLabelValue": "", +{{- end }} +{{- if not .Values.projectReleaseNamespaces.enabled }} + "systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }} +{{- else if and (ne (len .Values.global.cattle.systemProjectId) 0) (ne (len .Values.projectReleaseNamespaces.labelValue) 0) (ne .Values.projectReleaseNamespaces.labelValue .Values.global.cattle.systemProjectId) }} + "systemProjectLabelValues": {{ append .Values.otherSystemProjectLabelValues .Values.global.cattle.systemProjectId | toJson }} +{{- else if len .Values.otherSystemProjectLabelValues }} + "systemProjectLabelValues": {{ .Values.otherSystemProjectLabelValues | toJson }} +{{- else }} + "systemProjectLabelValues": [] +{{- end }} + } +--- +{{- if (and .Values.systemNamespacesConfigMap.rbac.enabled .Values.systemNamespacesConfigMap.rbac.subjects) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - "{{ template "helm-project-operator.name" . }}-system-namespaces" + verbs: + - 'get' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "helm-project-operator.name" . }}-system-namespaces + namespace: {{ template "helm-project-operator.namespace" . }} + labels: {{ include "helm-project-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "helm-project-operator.name" . }}-system-namespaces +subjects: {{ .Values.systemNamespacesConfigMap.rbac.subjects | toYaml | nindent 2 }} +{{- end }} +{{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/validate-psp-install.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/validate-psp-install.yaml new file mode 100644 index 00000000..a30c59d3 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/templates/validate-psp-install.yaml @@ -0,0 +1,7 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +#{{- if .Values.global.cattle.psp.enabled }} +#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}} +#{{- end }} +#{{- end }} +#{{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/values.yaml b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/values.yaml new file mode 100644 index 00000000..796bf5f1 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/charts/helmProjectOperator/values.yaml @@ -0,0 +1,228 @@ +# Default values for helm-project-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Helm Project Operator Configuration + +global: + cattle: + clusterId: "" + psp: + enabled: false + projectLabel: field.cattle.io/projectId + systemDefaultRegistry: "" + systemProjectId: "" + url: "" + rbac: + ## Create RBAC resources for ServiceAccounts and users + ## + create: true + + userRoles: + ## Create default user ClusterRoles to allow users to interact with ProjectHelmCharts + create: true + ## Aggregate default user ClusterRoles into default k8s ClusterRoles + aggregateToDefaultRoles: true + + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +helmApiVersion: dummy.cattle.io/v1alpha1 + +## valuesOverride overrides values that are set on each ProjectHelmChart deployment on an operator-level +## User-provided values will be overwritten based on the values provided here +valuesOverride: {} + +## projectReleaseNamespaces are auto-generated namespaces that are created to host Helm Releases +## managed by this operator on behalf of a ProjectHelmChart +projectReleaseNamespaces: + ## Enabled determines whether Project Release Namespaces should be created. If false, the underlying + ## Helm release will be deployed in the Project Registration Namespace + enabled: true + ## labelValue is the value of the Project that the projectReleaseNamespace should be created within + ## If empty, this will be set to the value of global.cattle.systemProjectId + ## If global.cattle.systemProjectId is also empty, project release namespaces will be disabled + labelValue: "" + +## otherSystemProjectLabelValues are project labels that identify namespaces as those that should be treated as system projects +## i.e. they will be entirely ignored by the operator +## By default, the global.cattle.systemProjectId will be in this list +otherSystemProjectLabelValues: [] + +## releaseRoleBindings configures RoleBindings automatically created by the Helm Project Operator +## in Project Release Namespaces where underlying Helm charts are deployed +releaseRoleBindings: + ## aggregate enables creating these RoleBindings off aggregating RoleBindings in the + ## Project Registration Namespace or ClusterRoleBindings that bind users to the ClusterRoles + ## specified under clusterRoleRefs + aggregate: true + + ## clusterRoleRefs are the ClusterRoles whose RoleBinding or ClusterRoleBindings should determine + ## the RoleBindings created in the Project Release Namespace + ## + ## By default, these are set to create RoleBindings based on the RoleBindings / ClusterRoleBindings + ## attached to the default K8s user-facing ClusterRoles of admin, edit, and view. + ## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + ## + clusterRoleRefs: + admin: admin + edit: edit + view: view + +hardenedNamespaces: + # Whether to automatically manage the configuration of the default ServiceAccount and + # auto-create a NetworkPolicy for each namespace created by this operator + enabled: true + + configuration: + # Values to be applied to each default ServiceAccount created in a managed namespace + serviceAccountSpec: + secrets: [] + imagePullSecrets: [] + automountServiceAccountToken: false + # Values to be applied to each default generated NetworkPolicy created in a managed namespace + networkPolicySpec: + podSelector: {} + egress: [] + ingress: [] + policyTypes: ["Ingress", "Egress"] + +## systemNamespacesConfigMap is a ConfigMap created to allow users to see valid entries +## for registering a ProjectHelmChart for a given Project on the Rancher Dashboard UI. +## It does not need to be enabled for a non-Rancher use case. +systemNamespacesConfigMap: + ## Create indicates whether the system namespaces configmap should be created + ## This is a required value for integration with Rancher Dashboard + create: true + + ## RBAC provides options around the RBAC created to allow users to be able to view + ## the systemNamespacesConfigMap; if not specified, only users with the ability to + ## view ConfigMaps in the namespace where this chart is deployed will be able to + ## properly view the system namespaces on the Rancher Dashboard UI + rbac: + ## enabled indicates that we should deploy a RoleBinding and Role to view this ConfigMap + enabled: true + ## subjects are the subjects that should be bound to this default RoleBinding + ## By default, we allow anyone who is authenticated to the system to be able to view + ## this ConfigMap in the deployment namespace + subjects: + - kind: Group + name: system:authenticated + +nameOverride: "" + +namespaceOverride: "" + +replicas: 1 + +image: + repository: ghcr.io/rancher/helm-project-operator + tag: v0.3.0 + pullPolicy: IfNotPresent + +helmController: + # Note: should be disabled for RKE2 clusters since they already run Helm Controller to manage internal Kubernetes components + enabled: true + + job: + image: + repository: rancher/klipper-helm + tag: v0.7.0-build20220315 + +helmLocker: + enabled: true + +# Additional arguments to be passed into the Helm Project Operator image +additionalArgs: [] + +## Define which Nodes the Pods are scheduled on. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for use with node taints +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" + +resources: {} + # limits: + # memory: 500Mi + # cpu: 1000m + # requests: + # memory: 100Mi + # cpu: 100m + +containerSecurityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # privileged: false + # readOnlyRootFilesystem: true + +securityContext: {} + # runAsGroup: 1000 + # runAsUser: 1000 + # supplementalGroups: + # - 1000 + +debug: false +debugLevel: 0 + +cleanup: + image: + repository: rancher/shell + tag: v0.1.19 + pullPolicy: IfNotPresent + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + containerSecurityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # privileged: false + # readOnlyRootFilesystem: true + + securityContext: + runAsNonRoot: false + runAsUser: 0 + + resources: {} + # limits: + # memory: 500Mi + # cpu: 1000m + # requests: + # memory: 100Mi + # cpu: 100m diff --git a/charts/prometheus-federator/0.4.3-rc.1/questions.yaml b/charts/prometheus-federator/0.4.3-rc.1/questions.yaml new file mode 100644 index 00000000..87cf1339 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/questions.yaml @@ -0,0 +1,43 @@ +questions: +- variable: global.cattle.psp.enabled + default: "false" + description: "Flag to enable or disable the installation of PodSecurityPolicies by this chart in the target cluster. If the cluster is running Kubernetes 1.25+, you must update this value to false." + label: "Enable PodSecurityPolicies" + type: boolean + group: "Security Settings" +- variable: helmProjectOperator.helmController.enabled + label: Enable Embedded Helm Controller + description: 'Note: If you are running Prometheus Federator in an RKE2 / K3s cluster before v1.23.14 / v1.24.8 / v1.25.4, this should be disabled.' + type: boolean + group: Helm Controller +- variable: helmProjectOperator.helmLocker.enabled + label: Enable Embedded Helm Locker + type: boolean + group: Helm Locker +- variable: helmProjectOperator.projectReleaseNamespaces.labelValue + label: Project Release Namespace Project ID + description: By default, the System Project is selected. This can be overriden to a different Project (e.g. p-xxxxx) + type: string + required: false + group: Namespaces +- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin + label: Admin ClusterRole + description: By default, admin selects Project Owners. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: admin + required: false + group: RBAC +- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit + label: Edit ClusterRole + description: By default, edit selects Project Members. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: edit + required: false + group: RBAC +- variable: helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view + label: View ClusterRole + description: By default, view selects Read-Only users. This can be overridden to a different ClusterRole (e.g. rt-xxxxx) + type: string + default: view + required: false + group: RBAC \ No newline at end of file diff --git a/charts/prometheus-federator/0.4.3-rc.1/templates/NOTES.txt b/charts/prometheus-federator/0.4.3-rc.1/templates/NOTES.txt new file mode 100644 index 00000000..f551f366 --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/templates/NOTES.txt @@ -0,0 +1,3 @@ +{{ $.Chart.Name }} has been installed. Check its status by running: + kubectl --namespace {{ template "prometheus-federator.namespace" . }} get pods -l "release={{ $.Release.Name }}" + diff --git a/charts/prometheus-federator/0.4.3-rc.1/templates/_helpers.tpl b/charts/prometheus-federator/0.4.3-rc.1/templates/_helpers.tpl new file mode 100644 index 00000000..15ea4e5c --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/templates/_helpers.tpl @@ -0,0 +1,66 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# Helm Project Operator + +{{/* vim: set filetype=mustache: */}} +{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}} +{{- define "prometheus-federator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "prometheus-federator.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* Create chart name and version as used by the chart label. */}} +{{- define "prometheus-federator.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end }} + +{{/* Generate basic labels */}} +{{- define "prometheus-federator.labels" }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}" +app.kubernetes.io/part-of: {{ template "prometheus-federator.name" . }} +chart: {{ template "prometheus-federator.chartref" . }} +release: {{ $.Release.Name | quote }} +heritage: {{ $.Release.Service | quote }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end }} diff --git a/charts/prometheus-federator/0.4.3-rc.1/values.yaml b/charts/prometheus-federator/0.4.3-rc.1/values.yaml new file mode 100644 index 00000000..3d04e3ee --- /dev/null +++ b/charts/prometheus-federator/0.4.3-rc.1/values.yaml @@ -0,0 +1,92 @@ +# Default values for helm-project-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Prometheus Federator Configuration + +global: + cattle: + psp: + enabled: false + systemDefaultRegistry: "" + projectLabel: field.cattle.io/projectId + clusterId: "" + systemProjectId: "" + url: "" + rbac: + pspEnabled: true + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +helmProjectOperator: + # ensures that all resources created by subchart show up as prometheus-federator + helmApiVersion: monitoring.cattle.io/v1alpha1 + + nameOverride: prometheus-federator + + helmController: + # Note: should be disabled for RKE2 clusters since they already run Helm Controller to manage internal Kubernetes components + enabled: true + + helmLocker: + enabled: true + + ## valuesOverride overrides values that are set on each Project Prometheus Stack Helm Chart deployment on an operator level + ## all values provided here will override any user-provided values automatically + valuesOverride: + + federate: + # Change this to point at all Prometheuses you want all your Project Prometheus Stacks to federate from + # By default, this matches the default deployment of Rancher Monitoring + targets: + - rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090 + + image: + repository: rancher/prometheus-federator + tag: v0.4.3-rc.1 + pullPolicy: IfNotPresent + + # Additional arguments to be passed into the Prometheus Federator image + additionalArgs: [] + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + resources: {} + # limits: + # memory: 500Mi + # cpu: 1000m + # requests: + # memory: 100Mi + # cpu: 100m + + securityContext: {} + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + + debug: false + debugLevel: 0 \ No newline at end of file diff --git a/docs/developing.md b/docs/developing.md index 0800b763..04d743cf 100644 --- a/docs/developing.md +++ b/docs/developing.md @@ -102,7 +102,7 @@ If you don't want to run all the steps in CI every time you make a change, you c REPO= TAG= -./scripts/build-chart && GOOS=linux CGO_ENABLED=0 go build -ldflags "-extldflags -static -s" -o bin/prometheus-federator && REPO=${REPO} TAG=${TAG} make package +./scripts/build-chart && GOOS=linux CGO_ENABLED=0 go build -ldflags "-extldflags -static -s" -o build/bin/prometheus-federator && REPO=${REPO} TAG=${TAG} make package ``` Once the image is successfully packaged, simply run `docker push ${REPO}/prometheus-federator:${TAG}` to push your image to your Docker repository. diff --git a/examples/ci/namespace.yaml b/examples/ci/namespace.yaml new file mode 100644 index 00000000..ef8eb7ec --- /dev/null +++ b/examples/ci/namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + field.cattle.io/projectId: local:p-example + labels: + field.cattle.io/projectId: p-example + name: e2e-prometheus-federator diff --git a/examples/ci-example.yaml b/examples/ci/project-helm-chart.yaml similarity index 100% rename from examples/ci-example.yaml rename to examples/ci/project-helm-chart.yaml diff --git a/examples/ci/project.yaml b/examples/ci/project.yaml new file mode 100644 index 00000000..4f4f3a37 --- /dev/null +++ b/examples/ci/project.yaml @@ -0,0 +1,8 @@ +apiVersion: management.cattle.io/v3 +kind: Project +metadata: + name: p-example + namespace: local +spec: + clusterName: local + displayName: PromFed Example diff --git a/examples/example.yaml b/examples/project-helm-chart.yaml similarity index 100% rename from examples/example.yaml rename to examples/project-helm-chart.yaml diff --git a/go.mod b/go.mod index bff5b48c..c95c1a52 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ replace ( ) require ( - github.com/rancher/helm-project-operator v0.2.1 + github.com/rancher/helm-project-operator v0.3.0 github.com/rancher/wrangler v1.0.2 github.com/rancher/wrangler-cli v0.0.0-20220624114648-479c5692ba22 github.com/spf13/cobra v1.6.1 @@ -62,7 +62,6 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.39.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect - github.com/rancher/helm-locker v0.0.1 // indirect github.com/rancher/lasso v0.0.0-20221227210133-6ea88ca2fbcc // indirect github.com/rubenv/sql-migrate v1.3.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect diff --git a/go.sum b/go.sum index 3e514a8c..6c3162e9 100644 --- a/go.sum +++ b/go.sum @@ -338,7 +338,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= @@ -368,10 +368,8 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rancher/client-go v1.25.4-rancher1 h1:9MlBC8QbgngUkhNzMR8rZmmCIj6WNRHFOnYiwC2Kty4= github.com/rancher/client-go v1.25.4-rancher1/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= -github.com/rancher/helm-locker v0.0.1 h1:v/m7Uu5wGivn+FQn5/xMuUG2L+CzocSzD7sjM7+/74E= -github.com/rancher/helm-locker v0.0.1/go.mod h1:PRThM9wL4o7MXJwUDeAk/+9s1vpmbRbacnGm+HoGqbY= -github.com/rancher/helm-project-operator v0.2.1 h1:EYqqyYgOOqCsNBAzTub9nJmjkmspfhLhO2u8iHPDLM4= -github.com/rancher/helm-project-operator v0.2.1/go.mod h1:8z5cRg/aOcxpIUWhLoW/bBUtxR8coJSnZT26k3bZa+g= +github.com/rancher/helm-project-operator v0.3.0 h1:9JtzgQVUnwgr9btUNFybPUuChAKOL3ye6855ATAtQyM= +github.com/rancher/helm-project-operator v0.3.0/go.mod h1:HkQq2yAWVGoZ0Q6jUlNTJaI2J8mar/PF8Ur2PN9nmYY= github.com/rancher/lasso v0.0.0-20221227210133-6ea88ca2fbcc h1:29VHrInLV4qSevvcvhBj5UhQWkPShxrxv4AahYg2Scw= github.com/rancher/lasso v0.0.0-20221227210133-6ea88ca2fbcc/go.mod h1:dEfC9eFQigj95lv/JQ8K5e7+qQCacWs1aIA6nLxKzT8= github.com/rancher/wrangler v1.0.2 h1:0JGv62gF2OkYUoR0fsr99Za63fquFeKTHE2z9kAFVsE= diff --git a/index.yaml b/index.yaml index 36b0ef06..97db9f39 100755 --- a/index.yaml +++ b/index.yaml @@ -1,6 +1,28 @@ apiVersion: v1 entries: prometheus-federator: + - annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Prometheus Federator + catalog.cattle.io/namespace: cattle-monitoring-system + catalog.cattle.io/os: linux,windows + catalog.cattle.io/permits-os: linux,windows + catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1 + catalog.cattle.io/release-name: prometheus-federator + apiVersion: v2 + appVersion: 0.4.3-rc.1 + created: "2024-10-07T13:17:48.590665-04:00" + dependencies: + - name: helmProjectOperator + repository: file://./charts/helmProjectOperator + version: 0.3.1 + description: Prometheus Federator + digest: 914d6606b938383710b923174e2505231fea144da555aa88119c7e3c88209c01 + icon: https://raw.githubusercontent.com/rancher/prometheus-federator/main/assets/logos/prometheus-federator.svg + name: prometheus-federator + urls: + - assets/prometheus-federator/prometheus-federator-0.4.3-rc.1.tgz + version: 0.4.3-rc.1 - annotations: catalog.cattle.io/certified: rancher catalog.cattle.io/display-name: Prometheus Federator diff --git a/main.go b/main.go index a17a0e21..81265b34 100644 --- a/main.go +++ b/main.go @@ -29,7 +29,7 @@ var ( // SystemNamespaces is the system namespaces scoped for the embedded monitoring chart (rancher-project-monitoring) SystemNamespaces = []string{"kube-system", "cattle-monitoring-system", "cattle-dashboards"} - //go:embed bin/rancher-project-monitoring/rancher-project-monitoring.tgz.base64 + //go:embed build/chart/rancher-project-monitoring.tgz.base64 base64TgzChart string debugConfig command.DebugConfig diff --git a/package/Dockerfile b/package/Dockerfile index d6997751..46fbc9c7 100644 --- a/package/Dockerfile +++ b/package/Dockerfile @@ -1,23 +1,45 @@ -FROM registry.suse.com/bci/golang:1.22 AS helm -RUN zypper -n install git -RUN git -C / clone --branch release-v3.9.0 --depth=1 https://github.com/rancher/helm +# Image that provides cross compilation tooling. +FROM --platform=$BUILDPLATFORM rancher/mirrored-tonistiigi-xx:1.3.0 AS xx + +FROM --platform=$BUILDPLATFORM registry.suse.com/bci/golang:1.22 AS helm + +# Clone repository once, and reuse it for target archs. +ARG HELM_VERSION=release-v3.9.0 +ADD --keep-git-dir=true https://github.com/rancher/helm.git#${HELM_VERSION} /helm +RUN cd /helm && go mod download + +COPY --from=xx / / + +# Cross-compile instead of emulating the compilation on the target arch. +ARG TARGETPLATFORM +RUN xx-go --wrap && mkdir -p /run/lock RUN make -C /helm -FROM registry.suse.com/bci/golang:1.22 as builder +RUN xx-verify --static /helm/bin/helm + +FROM registry.suse.com/bci/golang:1.22 AS builder + +# Allow chart version config +ARG TARGETPLATFORM +ARG EMBEDED_CHART_VERSION=0.3.4 +ARG TAG='' +ARG REPO='' +ENV EMBEDED_CHART_VERSION=$EMBEDED_CHART_VERSION TAG=$TAG REPO=$REPO + WORKDIR /usr/src/app COPY --from=helm ./helm/bin/helm /usr/local/bin/ RUN zypper -n install git vim less file curl wget patch COPY go.mod go.sum ./ RUN go mod download COPY . . -RUN make build +RUN ./scripts/build -FROM registry.suse.com/bci/bci-micro:15.5 +FROM registry.suse.com/bci/bci-micro:latest RUN echo 'prometheus:x:1000:1000::/home/prometheus:/bin/bash' >> /etc/passwd && \ echo 'prometheus:x:1000:' >> /etc/group && \ mkdir /home/prometheus && \ chown -R prometheus:prometheus /home/prometheus COPY --from=helm ./helm/bin/helm /usr/local/bin/ -COPY --from=builder /usr/src/app/bin/prometheus-federator /usr/bin/ +COPY --from=builder /usr/src/app/build/bin/prometheus-federator /usr/bin/ USER prometheus CMD ["prometheus-federator"] diff --git a/packages/prometheus-federator/charts/Chart.yaml b/packages/prometheus-federator/charts/Chart.yaml index b7f2297a..eea5005c 100755 --- a/packages/prometheus-federator/charts/Chart.yaml +++ b/packages/prometheus-federator/charts/Chart.yaml @@ -7,13 +7,12 @@ annotations: catalog.cattle.io/provides-gvr: helm.cattle.io.projecthelmchart/v1alpha1 catalog.cattle.io/release-name: prometheus-federator apiVersion: v2 -appVersion: 0.3.5 +appVersion: 0.4.3-rc.1 dependencies: -- condition: helmProjectOperator.enabled - name: helmProjectOperator +- name: helmProjectOperator repository: file://./charts/helmProjectOperator - version: 0.2.1 + version: 0.3.1 description: Prometheus Federator icon: https://raw.githubusercontent.com/rancher/prometheus-federator/main/assets/logos/prometheus-federator.svg name: prometheus-federator -version: 0.4.2 +version: 0.4.3-rc.1 diff --git a/packages/prometheus-federator/charts/values.yaml b/packages/prometheus-federator/charts/values.yaml index 103c1604..3d04e3ee 100755 --- a/packages/prometheus-federator/charts/values.yaml +++ b/packages/prometheus-federator/charts/values.yaml @@ -32,8 +32,6 @@ global: # - name: "image-pull-secret" helmProjectOperator: - enabled: true - # ensures that all resources created by subchart show up as prometheus-federator helmApiVersion: monitoring.cattle.io/v1alpha1 @@ -58,7 +56,7 @@ helmProjectOperator: image: repository: rancher/prometheus-federator - tag: v0.3.5 + tag: v0.4.3-rc.1 pullPolicy: IfNotPresent # Additional arguments to be passed into the Prometheus Federator image diff --git a/packages/prometheus-federator/generated-changes/dependencies/helmProjectOperator/dependency.yaml b/packages/prometheus-federator/generated-changes/dependencies/helmProjectOperator/dependency.yaml index e8b94722..41a0fa0c 100644 --- a/packages/prometheus-federator/generated-changes/dependencies/helmProjectOperator/dependency.yaml +++ b/packages/prometheus-federator/generated-changes/dependencies/helmProjectOperator/dependency.yaml @@ -1,3 +1,2 @@ -url: https://github.com/rancher/helm-project-operator.git -subdirectory: charts/helm-project-operator -commit: 54630179aeae78d5bb28688ee2262a0ec362ec65 +url: https://github.com/rancher/helm-project-operator/releases/download/v0.3.1/helm-project-operator-0.3.1.tgz +doNotRelease: true \ No newline at end of file diff --git a/scripts/build b/scripts/build index 73d7d55a..f3f04ce6 100755 --- a/scripts/build +++ b/scripts/build @@ -7,14 +7,41 @@ cd $(dirname $0)/.. ./scripts/build-chart -mkdir -p bin +echo "Starting \`prometheus-federator\` binary build:"; + +mkdir -p build/bin if [ "$(uname)" = "Linux" ]; then OTHER_LINKFLAGS="-extldflags -static -s" fi + LINKFLAGS="-X github.com/rancher/prometheus-federator/pkg/version.Version=$VERSION" LINKFLAGS="-X github.com/rancher/prometheus-federator/pkg/version.GitCommit=$COMMIT $LINKFLAGS" -CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/prometheus-federator -if [ "$CROSS" = "true" ] && [ "$ARCH" = "amd64" ]; then - GOOS=darwin go build -ldflags "$LINKFLAGS" -o bin/prometheus-federator-darwin - GOOS=windows go build -ldflags "$LINKFLAGS" -o bin/prometheus-federator-windows + +ARCHES=( "$ARCH" ) +# Set CROSS_ARCH to build for the other architecture +if [ "$CROSS_ARCH" == "true" ]; then + case "$ARCH" in + amd64) XARCH=arm64 ;; + arm64) XARCH=amd64 ;; + *) echo "Unsupported ARCH of $ARCH" 1>&2 ; exit 1 + esac + ARCHES+=( "$XARCH" ) fi +echo "Building for Arch(s): ${ARCHES[*]}" + +for A in "${ARCHES[@]}" ; do + GOARCH="$A" CGO_ENABLED=0 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o "build/bin/prometheus-federator-$A" + # Set CROSS to build for other OS'es + if [ "$CROSS" = "true" ]; then + for OS in darwin windows ; do + GOARCH="$A" GOOS=$OS go build -ldflags "$LINKFLAGS" -o "build/bin/prometheus-federator-$OS-$A" + echo "Built \`prometheus-federator-$OS-$A\`" + done + fi +done + +cd build/bin +ln -sf "./prometheus-federator-$ARCH" "./prometheus-federator" +cd ../.. + +echo "Completed \`prometheus-federator\` binary build." \ No newline at end of file diff --git a/scripts/build-chart b/scripts/build-chart index 0ea7c4b0..4196c4ef 100755 --- a/scripts/build-chart +++ b/scripts/build-chart @@ -5,9 +5,13 @@ source $(dirname $0)/version cd $(dirname $0)/.. -CHART=rancher-project-monitoring -VERSION=$(find ./charts/${CHART} -type d -maxdepth 1 -mindepth 1 | tr - \~ | sort -rV | tr \~ - | head -n1 | cut -d'/' -f4) +CHART=${CHART:-rancher-project-monitoring} +VERSION=${EMBEDED_CHART_VERSION:-$(find ./charts/${CHART} -maxdepth 1 -mindepth 1 -type d | tr - \~ | sort -rV | tr \~ - | head -n1 | cut -d'/' -f4)} -helm package charts/${CHART}/${VERSION} --destination bin/${CHART} -base64 -i bin/${CHART}/${CHART}-${VERSION}.tgz > bin/${CHART}/${CHART}.tgz.base64 -rm bin/${CHART}/${CHART}-${VERSION}.tgz \ No newline at end of file +mkdir -p build/bin + +helm package charts/${CHART}/${VERSION} --destination build/chart +base64 -i build/chart/${CHART}-${VERSION}.tgz > build/chart/${CHART}.tgz.base64 +rm build/chart/${CHART}-${VERSION}.tgz + +echo "Completed ${CHART} (${VERSION}) build process." \ No newline at end of file diff --git a/scripts/package b/scripts/package index e86450e5..9f7a9160 100755 --- a/scripts/package +++ b/scripts/package @@ -5,8 +5,10 @@ source $(dirname $0)/version cd $(dirname $0)/.. +echo "Starting \`prometheus-federator\` packaging:"; + mkdir -p dist/artifacts -cp bin/prometheus-federator dist/artifacts/prometheus-federator${SUFFIX} +cp build/bin/prometheus-federator dist/artifacts/prometheus-federator${SUFFIX} IMAGE=${REPO}/prometheus-federator:${TAG} DOCKERFILE=package/Dockerfile @@ -14,5 +16,6 @@ if [ -e ${DOCKERFILE}.${ARCH} ]; then DOCKERFILE=${DOCKERFILE}.${ARCH} fi +echo "Building \`${DOCKERFILE}\` with name \`${IMAGE}\`:"; docker build -f ${DOCKERFILE} -t ${IMAGE} . -echo Built ${IMAGE} +echo "Completed building ${IMAGE} container image" diff --git a/scripts/pull-scripts b/scripts/pull-scripts index 5cb48d29..9a8464d5 100755 --- a/scripts/pull-scripts +++ b/scripts/pull-scripts @@ -26,10 +26,10 @@ if [[ "$OS" == "windows" ]]; then else BINARY_NAME="charts-build-scripts_${OS}_${ARCH}" fi -curl -s -L ${CHARTS_BUILD_SCRIPTS_REPO%.git}/releases/download/${CHARTS_BUILD_SCRIPT_VERSION}/${BINARY_NAME} --output bin/charts-build-scripts +response_code=$(curl -s -o bin/charts-build-scripts -w "%{http_code}" -L "${CHARTS_BUILD_SCRIPTS_REPO%.git}/releases/download/${CHARTS_BUILD_SCRIPT_VERSION}/${BINARY_NAME}") # Fall back to binary name format from old release scheme -if ! [[ -f bin/charts-build-scripts ]] || [[ $(cat bin/charts-build-scripts) == "Not Found" ]]; then +if ! [[ -f bin/charts-build-scripts ]] || [[ "$response_code" == "404" ]]; then echo "Falling back to old binary name format..." rm bin/charts-build-scripts; if [[ ${OS} == "linux" ]]; then @@ -37,11 +37,11 @@ if ! [[ -f bin/charts-build-scripts ]] || [[ $(cat bin/charts-build-scripts) == else BINARY_NAME=charts-build-scripts-${OS} fi - curl -s -L ${CHARTS_BUILD_SCRIPTS_REPO%.git}/releases/download/${CHARTS_BUILD_SCRIPT_VERSION}/${BINARY_NAME} --output bin/charts-build-scripts + response_code=$(curl -s -o bin/charts-build-scripts -w "%{http_code}" -L "${CHARTS_BUILD_SCRIPTS_REPO%.git}/releases/download/${CHARTS_BUILD_SCRIPT_VERSION}/${BINARY_NAME}") fi # If falling back to old binary name format did not work, fail -if ! [[ -f bin/charts-build-scripts ]] || [[ $(cat bin/charts-build-scripts) == "Not Found" ]]; then +if ! [[ -f bin/charts-build-scripts ]] || [[ "$response_code" == "404" ]]; then echo "Failed to find charts-build-scripts binary" rm bin/charts-build-scripts; exit 1 diff --git a/scripts/validate b/scripts/validate index 35a87f00..feb8b87d 100755 --- a/scripts/validate +++ b/scripts/validate @@ -3,7 +3,8 @@ set -e cd $(dirname $0)/.. -echo Running validation +echo "Running validation" PACKAGES="$(go list ./...)" echo Running: go fmt test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)" +echo "Validate passed" \ No newline at end of file diff --git a/scripts/validate-chart b/scripts/validate-chart index 37473add..11677faf 100755 --- a/scripts/validate-chart +++ b/scripts/validate-chart @@ -3,7 +3,7 @@ set -e cd $(dirname $0)/.. - +echo "Validating newest prometheus-federator chart" CHART=prometheus-federator VERSION=$(find ./charts/${CHART} -type d -maxdepth 1 -mindepth 1 | tr - \~ | sort -rV | tr \~ - | head -n1 | cut -d'/' -f4) diff --git a/scripts/validate-charts b/scripts/validate-charts index f481bdeb..44b2cfe3 100755 --- a/scripts/validate-charts +++ b/scripts/validate-charts @@ -3,7 +3,7 @@ set -e cd $(dirname $0)/.. -echo Running chart validation +echo Running general chart validation ./scripts/pull-scripts ./bin/charts-build-scripts validate --local \ No newline at end of file diff --git a/scripts/validate-ci b/scripts/validate-ci index e7981cf8..38121d5e 100755 --- a/scripts/validate-ci +++ b/scripts/validate-ci @@ -3,6 +3,7 @@ set -e cd $(dirname $0)/.. +echo "Verifying code is generated and repo is clean" go generate source ./scripts/version diff --git a/scripts/validate-packages b/scripts/validate-packages index 7e580657..509720e2 100755 --- a/scripts/validate-packages +++ b/scripts/validate-packages @@ -1,6 +1,7 @@ #!/bin/bash set -e +echo "Validating packages..." cd $(dirname $0)/.. monitoring_pkg_path=packages/rancher-project-monitoring/package.yaml diff --git a/scripts/version b/scripts/version index 5f357a62..2215508b 100755 --- a/scripts/version +++ b/scripts/version @@ -1,4 +1,5 @@ #!/bin/bash +set -x CHARTS_BUILD_SCRIPTS_REPO=https://github.com/rancher/charts-build-scripts.git CHARTS_BUILD_SCRIPT_VERSION=v0.9.2 @@ -8,7 +9,7 @@ if [ -n "$(git status --porcelain --untracked-files=no)" ]; then fi COMMIT=$(git rev-parse --short HEAD) -GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)} +GIT_TAG=$(git tag -l --contains HEAD | head -n 1) if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then VERSION=$GIT_TAG @@ -16,10 +17,25 @@ else VERSION="${COMMIT}${DIRTY}" fi +ARCH=$TARGET_ARCH +if [ -z "$ARCH" ]; then + ARCH=$(go env GOHOSTARCH) +fi + +SUFFIX="-${ARCH}" + TAG=${TAG:-${VERSION}} REPO=${REPO:-rancher} -if echo $TAG | grep -q dirty; then - TAG=dev +if echo "$TAG" | grep -q dirty; then + TAG="v0.0.0-dev.1-${COMMIT}" fi -IMAGE=${IMAGE:-$REPO/prometheus-federator:${TAG}} \ No newline at end of file +IMAGE=${IMAGE:-"$REPO/prometheus-federator:${TAG}"} + +function print_version_debug() { + echo "BUILD_TARGET: $BUILD_TARGET"; + echo "SUFFIX: $SUFFIX"; + echo "REPO: $REPO; TAG: $TAG"; + echo "IMAGE: $IMAGE"; +} +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then print_version_debug "$1"; fi \ No newline at end of file