diff --git a/.codecov.yml b/.codecov.yml index 9b9f57ef04..2758a60933 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -8,13 +8,16 @@ coverage: default: threshold: 0.1 ignore: - - "pkg/apis/rollouts/v1alpha1" - - "test" - - "**/*.pb.go" - - "**/*.pb.gw.go" - - "**/*generated.go" - - "**/*generated.deepcopy.go" - - "**/*_test.go" - - "pkg/apis/client/.*" - - "pkg/client/.*" - - "vendor/.*" + - 'pkg/apis/rollouts/v1alpha1' + - 'test' + - '**/*.pb.go' + - '**/*.pb.gw.go' + - '**/*generated.go' + - '**/*generated.deepcopy.go' + - '**/*_test.go' + - 'pkg/apis/client/.*' + - 'pkg/client/.*' + - 'vendor/.*' + - '**/mocks/*' + - 'hack/gen-crd-spec/main.go' + - 'hack/gen-docs/main.go' diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 6120a179b9..326772bb06 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -12,7 +12,7 @@ jobs: pull-requests: write # for peter-evans/create-pull-request to create a PR runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Update Changelog @@ -23,7 +23,7 @@ jobs: ./git-chglog --sort semver -o CHANGELOG.md v1.3.1.. rm git-chglog - name: Create Pull Request - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@v6 with: commit-message: update changelog title: "docs: Update Changelog" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d8309ea267..4116801130 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -27,11 +27,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java @@ -39,7 +39,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -53,4 +53,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index 7322aab050..a155ef9d9b 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -39,7 +39,7 @@ jobs: - name: Docker meta (controller) id: controller-meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: | quay.io/codefresh/argo-rollouts @@ -52,7 +52,7 @@ jobs: - name: Docker meta (plugin) id: plugin-meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: | quay.io/codefresh/kubectl-argo-rollouts diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 46634b9925..f410e66373 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} @@ -38,18 +38,20 @@ jobs: fail-fast: false matrix: kubernetes-minor-version: - - 1.23 - 1.24 - 1.25 - 1.26 + - 1.27 + - 1.28 + - 1.29 name: Run end-to-end tests runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v4.1.0 + uses: actions/setup-go@v5.0.0 with: - go-version: '1.20' - - uses: actions/checkout@v3.1.0 + go-version: '1.21' + - uses: actions/checkout@v4 - name: Setup k3s env: INSTALL_K3S_CHANNEL: v${{ matrix.kubernetes-minor-version }} @@ -60,7 +62,7 @@ jobs: sudo chmod 755 ~/.kube/config kubectl version kubectl create ns argo-rollouts - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} @@ -85,14 +87,14 @@ jobs: [[ -f rerunreport.txt ]] && cat rerunreport.txt || echo "No rerun report found" - name: Upload E2E Test Results if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: E2E Test Results (k8s ${{ matrix.kubernetes-minor-version }}) path: | junit.xml - name: Upload e2e-controller logs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: e2e-controller-k8s-${{ matrix.kubernetes-minor-version }}.log path: /tmp/e2e-controller.log - if: ${{ failure() }} + if: ${{ always() }} diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml index 59fece9c6e..ffd8b736d0 100644 --- a/.github/workflows/gh-pages.yaml +++ b/.github/workflows/gh-pages.yaml @@ -18,15 +18,15 @@ jobs: contents: write # for peaceiris/actions-gh-pages to push pages branch runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3.1.0 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.x - name: Set up Go - uses: actions/setup-go@v4.1.0 + uses: actions/setup-go@v5.0.0 with: - go-version: '1.20' + go-version: '1.21' - name: build run: | pip install mkdocs mkdocs_material diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 1733811964..8cd5e4d106 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -11,7 +11,7 @@ on: - "master" env: # Golang version to use across CI steps - GOLANG_VERSION: '1.20' + GOLANG_VERSION: '1.21' concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Upload - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Event File path: ${{ github.event_path }} @@ -35,31 +35,31 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v4.1.0 + uses: actions/setup-go@v5.0.0 with: go-version: ${{ env.GOLANG_VERSION }} - name: Checkout code - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 - name: Run golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: - version: v1.53.3 + version: v1.57.2 args: --timeout 6m build: name: Build runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v4.1.0 + uses: actions/setup-go@v5.0.0 with: go-version: ${{ env.GOLANG_VERSION }} id: go - name: Check out code into the Go module directory - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 - name: Restore go build cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.cache/go-build key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} @@ -76,20 +76,20 @@ jobs: - name: Upload Unit Test Results if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Unit Test Results path: | junit.xml - name: Generate code coverage artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: code-coverage path: coverage.out - name: Upload code coverage information to codecov.io - uses: codecov/codecov-action@v3.1.4 + uses: codecov/codecov-action@v4.1.1 with: file: coverage.out @@ -100,9 +100,9 @@ jobs: GOPATH: /home/runner/go steps: - name: Checkout code - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 - name: Setup Golang - uses: actions/setup-go@v4.1.0 + uses: actions/setup-go@v5.0.0 with: go-version: ${{ env.GOLANG_VERSION }} # k8s codegen generates files into GOPATH location instead of the GitHub git checkout location @@ -111,15 +111,15 @@ jobs: run: | mkdir -p ~/go/src/github.com/argoproj ln -s $(pwd) ~/go/src/github.com/argoproj/argo-rollouts - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: /home/runner/.cache/go-build key: GOCACHE-${{ hashFiles('**/go.mod') }} - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: /home/runner/go/pkg/mod key: GOMODCACHE-${{ hashFiles('**/go.mod') }} - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: /home/runner/go/bin key: go-bin-v1-${{ hashFiles('**/go.mod') }} diff --git a/.github/workflows/image-reuse.yaml b/.github/workflows/image-reuse.yaml index 10e2df49d5..970080e845 100644 --- a/.github/workflows/image-reuse.yaml +++ b/.github/workflows/image-reuse.yaml @@ -58,28 +58,28 @@ jobs: image-digest: ${{ steps.image.outputs.digest }} steps: - name: Checkout code - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.3.0 + uses: actions/checkout@v4 # v3.3.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} if: ${{ github.ref_type == 'tag'}} - name: Checkout code - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.3.0 + uses: actions/checkout@v4 # v3.3.0 if: ${{ github.ref_type != 'tag'}} - name: Setup Golang - uses: actions/setup-go@v4.1.0 # v3.5.0 + uses: actions/setup-go@v5.0.0 # v3.5.0 with: go-version: ${{ inputs.go-version }} - name: Install cosign - uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 # v3.1.1 + uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0 with: cosign-release: 'v2.2.0' - - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 - - uses: docker/setup-buildx-action@4c0219f9ac95b02789c1075625400b2acbff50b1 # v2.9.1 + - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 + - uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 - name: Setup tags for container image as a CSV type run: | @@ -106,7 +106,7 @@ jobs: echo 'EOF' >> $GITHUB_ENV - name: Login to Quay.io - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 with: registry: quay.io username: ${{ secrets.quay_username }} @@ -114,7 +114,7 @@ jobs: if: ${{ inputs.quay_image_name && inputs.push }} - name: Login to GitHub Container Registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 with: registry: ghcr.io username: ${{ secrets.ghcr_username }} @@ -122,7 +122,7 @@ jobs: if: ${{ inputs.ghcr_image_name && inputs.push }} - name: Login to dockerhub Container Registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 with: username: ${{ secrets.docker_username }} password: ${{ secrets.docker_password }} @@ -130,7 +130,7 @@ jobs: - name: Build and push container image id: image - uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 #v5.3.0 with: context: . platforms: ${{ inputs.platforms }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2d050379fd..594773459e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,7 +10,7 @@ on: description: 'Update stable tag' required: true type: boolean - default: 'false' + default: false permissions: contents: read @@ -120,14 +120,14 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 # v3.5.2 with: ref: ${{ github.event.inputs.tag }} - name: Setup Golang uses: actions/setup-go@v4 with: - go-version: 1.19 + go-version: 1.21 - name: Generate release artifacts run: | diff --git a/.github/workflows/stale-issues-pr.yml b/.github/workflows/stale-issues-pr.yml index 79508bdefd..041a5206ab 100644 --- a/.github/workflows/stale-issues-pr.yml +++ b/.github/workflows/stale-issues-pr.yml @@ -13,7 +13,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: operations-per-run: 250 stale-issue-message: 'This issue is stale because it has been open 60 days with no activity.' diff --git a/.github/workflows/waiting-issues.yml b/.github/workflows/waiting-issues.yml index 6dd92d0683..56b30b5608 100644 --- a/.github/workflows/waiting-issues.yml +++ b/.github/workflows/waiting-issues.yml @@ -13,7 +13,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: operations-per-run: 250 stale-issue-message: 'This issue is stale because it has awaiting-response label for 5 days with no activity. Remove stale label or comment or this will be closed in 5 days.' diff --git a/CHANGELOG.md b/CHANGELOG.md index c9255af062..c92e20fed3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,267 @@ + +## [v1.6.6](https://github.com/argoproj/argo-rollouts/compare/v1.6.5...v1.6.6) (2024-02-12) + +### Fix + +* stuck rollout when 2nd deployment happens before 1st finishes ([#3354](https://github.com/argoproj/argo-rollouts/issues/3354)) +* do not require pod readiness when switching desired service selector on abort ([#3338](https://github.com/argoproj/argo-rollouts/issues/3338)) + + + +## [v1.6.5](https://github.com/argoproj/argo-rollouts/compare/v1.6.4...v1.6.5) (2024-01-25) + +### Chore + +* add logging context around replicaset updates ([#3326](https://github.com/argoproj/argo-rollouts/issues/3326)) +* remove year from codegen license ([#3282](https://github.com/argoproj/argo-rollouts/issues/3282)) + +### Fix + +* log rs name when update fails ([#3318](https://github.com/argoproj/argo-rollouts/issues/3318)) +* keep rs inormer updated upon updating labels and annotations ([#3321](https://github.com/argoproj/argo-rollouts/issues/3321)) +* updates to replicas and pod template at the same time causes rollout to get stuck ([#3272](https://github.com/argoproj/argo-rollouts/issues/3272)) + + + +## [v1.6.4](https://github.com/argoproj/argo-rollouts/compare/v1.6.3...v1.6.4) (2023-12-08) + +### Fix + +* make sure we use the updated rs when we write back to informer ([#3237](https://github.com/argoproj/argo-rollouts/issues/3237)) +* conflict on updates to replicaset revision ([#3216](https://github.com/argoproj/argo-rollouts/issues/3216)) + + + +## [v1.6.3](https://github.com/argoproj/argo-rollouts/compare/v1.6.2...v1.6.3) (2023-12-04) + +### Build + +* **deps:** always resolve momentjs version 2.29.4 ([#3182](https://github.com/argoproj/argo-rollouts/issues/3182)) + +### Fix + +* rollouts getting stuck due to bad rs informer updates ([#3200](https://github.com/argoproj/argo-rollouts/issues/3200)) + + + +## [v1.6.2](https://github.com/argoproj/argo-rollouts/compare/v1.6.1...v1.6.2) (2023-11-02) + +### Fix + +* Revert "fix: istio destionationrule subsets enforcement ([#3126](https://github.com/argoproj/argo-rollouts/issues/3126))" ([#3147](https://github.com/argoproj/argo-rollouts/issues/3147)) + + + +## [v1.6.1](https://github.com/argoproj/argo-rollouts/compare/v1.6.0...v1.6.1) (2023-11-01) + +### Chore + +* upgrade cosign ([#3139](https://github.com/argoproj/argo-rollouts/issues/3139)) +* add missing rollout fields ([#3062](https://github.com/argoproj/argo-rollouts/issues/3062)) +* change file name for readthedocs compatibility ([#2999](https://github.com/argoproj/argo-rollouts/issues/2999)) + +### Fix + +* istio destionationrule subsets enforcement ([#3126](https://github.com/argoproj/argo-rollouts/issues/3126)) +* docs require build.os to be defined ([#3133](https://github.com/argoproj/argo-rollouts/issues/3133)) +* inopportune scaling events would lose some status fields ([#3060](https://github.com/argoproj/argo-rollouts/issues/3060)) +* rollback to stable with dynamicStableScale could overwhelm stable pods ([#3077](https://github.com/argoproj/argo-rollouts/issues/3077)) +* prevent hot loop when fully promoted rollout is aborted ([#3064](https://github.com/argoproj/argo-rollouts/issues/3064)) +* keep rs informer updated ([#3091](https://github.com/argoproj/argo-rollouts/issues/3091)) +* bump notification-engine to fix double send on self server notifications ([#3095](https://github.com/argoproj/argo-rollouts/issues/3095)) +* sync notification controller configmaps/secrets first ([#3075](https://github.com/argoproj/argo-rollouts/issues/3075)) +* missing notification on error ([#3076](https://github.com/argoproj/argo-rollouts/issues/3076)) + + + +## [v1.6.0](https://github.com/argoproj/argo-rollouts/compare/v1.6.0-rc1...v1.6.0) (2023-09-05) + +### Chore + +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.20.2 to 1.21.0 ([#2950](https://github.com/argoproj/argo-rollouts/issues/2950)) +* **deps:** bump github.com/antonmedv/expr from 1.12.7 to 1.13.0 ([#2951](https://github.com/argoproj/argo-rollouts/issues/2951)) + +### Docs + +* update supported k8s version ([#2949](https://github.com/argoproj/argo-rollouts/issues/2949)) + +### Fix + +* analysis step should be ignored after promote ([#3016](https://github.com/argoproj/argo-rollouts/issues/3016)) +* **controller:** rollback should skip all steps to active rs within RollbackWindow ([#2953](https://github.com/argoproj/argo-rollouts/issues/2953)) +* **controller:** typo fix ("Secrete" -> "Secret") in secret informer ([#2965](https://github.com/argoproj/argo-rollouts/issues/2965)) + + + +## [v1.6.0-rc1](https://github.com/argoproj/argo-rollouts/compare/v1.5.1...v1.6.0-rc1) (2023-08-10) + +### Chore + +* bump gotestsum and fix flakey test causing nil channel send ([#2934](https://github.com/argoproj/argo-rollouts/issues/2934)) +* quote golang version string to not use go 1.2.2 ([#2915](https://github.com/argoproj/argo-rollouts/issues/2915)) +* bump golang to 1.20 ([#2910](https://github.com/argoproj/argo-rollouts/issues/2910)) +* add make help cmd ([#2854](https://github.com/argoproj/argo-rollouts/issues/2854)) +* add unit test ([#2798](https://github.com/argoproj/argo-rollouts/issues/2798)) +* Update test and related docs for plugin name standard ([#2728](https://github.com/argoproj/argo-rollouts/issues/2728)) +* bump k8s deps to v0.25.8 ([#2712](https://github.com/argoproj/argo-rollouts/issues/2712)) +* add zachaller as lead in owers file ([#2759](https://github.com/argoproj/argo-rollouts/issues/2759)) +* Add tests for pause functionality in rollout package ([#2772](https://github.com/argoproj/argo-rollouts/issues/2772)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.26.0 to 1.26.1 ([#2840](https://github.com/argoproj/argo-rollouts/issues/2840)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.30 to 1.18.31 ([#2924](https://github.com/argoproj/argo-rollouts/issues/2924)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.27.0 to 1.27.1 ([#2927](https://github.com/argoproj/argo-rollouts/issues/2927)) +* **deps:** bump docker/build-push-action from 4.0.0 to 4.1.0 ([#2832](https://github.com/argoproj/argo-rollouts/issues/2832)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.26.3 to 1.27.0 ([#2922](https://github.com/argoproj/argo-rollouts/issues/2922)) +* **deps:** bump github.com/sirupsen/logrus from 1.9.2 to 1.9.3 ([#2821](https://github.com/argoproj/argo-rollouts/issues/2821)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.29 to 1.18.30 ([#2919](https://github.com/argoproj/argo-rollouts/issues/2919)) +* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.19.0 to 1.19.1 ([#2920](https://github.com/argoproj/argo-rollouts/issues/2920)) +* **deps:** bump google.golang.org/grpc from 1.56.2 to 1.57.0 ([#2908](https://github.com/argoproj/argo-rollouts/issues/2908)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.28 to 1.18.29 ([#2907](https://github.com/argoproj/argo-rollouts/issues/2907)) +* **deps:** bump github.com/antonmedv/expr from 1.12.6 to 1.12.7 ([#2894](https://github.com/argoproj/argo-rollouts/issues/2894)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.26.2 to 1.26.3 ([#2884](https://github.com/argoproj/argo-rollouts/issues/2884)) +* **deps:** bump docker/setup-qemu-action from 2.1.0 to 2.2.0 ([#2878](https://github.com/argoproj/argo-rollouts/issues/2878)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.27 to 1.18.28 ([#2883](https://github.com/argoproj/argo-rollouts/issues/2883)) +* **deps:** bump slsa-framework/slsa-github-generator from 1.6.0 to 1.7.0 ([#2880](https://github.com/argoproj/argo-rollouts/issues/2880)) +* **deps:** bump actions/setup-go from 4.0.0 to 4.0.1 ([#2881](https://github.com/argoproj/argo-rollouts/issues/2881)) +* **deps:** bump docker/setup-buildx-action from 2.5.0 to 2.9.1 ([#2879](https://github.com/argoproj/argo-rollouts/issues/2879)) +* **deps:** bump docker/login-action from 2.1.0 to 2.2.0 ([#2877](https://github.com/argoproj/argo-rollouts/issues/2877)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.13 to 1.19.14 ([#2886](https://github.com/argoproj/argo-rollouts/issues/2886)) +* **deps:** bump github.com/antonmedv/expr from 1.12.5 to 1.12.6 ([#2882](https://github.com/argoproj/argo-rollouts/issues/2882)) +* **deps:** bump google.golang.org/grpc from 1.56.1 to 1.56.2 ([#2872](https://github.com/argoproj/argo-rollouts/issues/2872)) +* **deps:** bump sigstore/cosign-installer from 3.1.0 to 3.1.1 ([#2860](https://github.com/argoproj/argo-rollouts/issues/2860)) +* **deps:** bump google.golang.org/protobuf from 1.30.0 to 1.31.0 ([#2859](https://github.com/argoproj/argo-rollouts/issues/2859)) +* **deps:** bump sigstore/cosign-installer from 3.0.5 to 3.1.0 ([#2858](https://github.com/argoproj/argo-rollouts/issues/2858)) +* **deps:** bump google.golang.org/grpc from 1.55.0 to 1.56.1 ([#2856](https://github.com/argoproj/argo-rollouts/issues/2856)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.12 to 1.19.13 ([#2847](https://github.com/argoproj/argo-rollouts/issues/2847)) +* **deps:** bump actions/setup-go from 3.5.0 to 4.0.1 ([#2849](https://github.com/argoproj/argo-rollouts/issues/2849)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.26 to 1.18.27 ([#2844](https://github.com/argoproj/argo-rollouts/issues/2844)) +* **deps:** bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 ([#2846](https://github.com/argoproj/argo-rollouts/issues/2846)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.26.1 to 1.26.2 ([#2848](https://github.com/argoproj/argo-rollouts/issues/2848)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.11 to 1.19.12 ([#2839](https://github.com/argoproj/argo-rollouts/issues/2839)) +* **deps:** bump slsa-framework/slsa-github-generator from 1.7.0 to 1.8.0 ([#2936](https://github.com/argoproj/argo-rollouts/issues/2936)) +* **deps:** bump docker/build-push-action from 4.1.0 to 4.1.1 ([#2837](https://github.com/argoproj/argo-rollouts/issues/2837)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.25 to 1.18.26 ([#2841](https://github.com/argoproj/argo-rollouts/issues/2841)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.31 to 1.18.32 ([#2928](https://github.com/argoproj/argo-rollouts/issues/2928)) +* **deps:** bump github.com/hashicorp/go-plugin from 1.4.9 to 1.4.10 ([#2822](https://github.com/argoproj/argo-rollouts/issues/2822)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.14 to 1.20.1 ([#2926](https://github.com/argoproj/argo-rollouts/issues/2926)) +* **deps:** bump github.com/stretchr/testify from 1.8.3 to 1.8.4 ([#2817](https://github.com/argoproj/argo-rollouts/issues/2817)) +* **deps:** bump github.com/sirupsen/logrus from 1.9.1 to 1.9.2 ([#2789](https://github.com/argoproj/argo-rollouts/issues/2789)) +* **deps:** bump github.com/stretchr/testify from 1.8.2 to 1.8.3 ([#2796](https://github.com/argoproj/argo-rollouts/issues/2796)) +* **deps:** bump sigstore/cosign-installer from 3.0.3 to 3.0.5 ([#2788](https://github.com/argoproj/argo-rollouts/issues/2788)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.20.1 to 1.20.2 ([#2941](https://github.com/argoproj/argo-rollouts/issues/2941)) +* **deps:** bump github.com/sirupsen/logrus from 1.9.0 to 1.9.1 ([#2784](https://github.com/argoproj/argo-rollouts/issues/2784)) +* **deps:** bump codecov/codecov-action from 3.1.3 to 3.1.4 ([#2782](https://github.com/argoproj/argo-rollouts/issues/2782)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.24 to 1.18.25 ([#2770](https://github.com/argoproj/argo-rollouts/issues/2770)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.23 to 1.18.24 ([#2768](https://github.com/argoproj/argo-rollouts/issues/2768)) +* **deps:** bump google.golang.org/grpc from 1.54.0 to 1.55.0 ([#2763](https://github.com/argoproj/argo-rollouts/issues/2763)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.22 to 1.18.23 ([#2756](https://github.com/argoproj/argo-rollouts/issues/2756)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.27.1 to 1.27.2 ([#2944](https://github.com/argoproj/argo-rollouts/issues/2944)) +* **deps:** replace `github.com/ghodss/yaml` with `sigs.k8s.io/yaml` ([#2681](https://github.com/argoproj/argo-rollouts/issues/2681)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.10 to 1.26.0 ([#2755](https://github.com/argoproj/argo-rollouts/issues/2755)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.10 to 1.19.11 ([#2757](https://github.com/argoproj/argo-rollouts/issues/2757)) +* **deps:** bump github.com/prometheus/client_golang from 1.15.0 to 1.15.1 ([#2754](https://github.com/argoproj/argo-rollouts/issues/2754)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.21 to 1.18.22 ([#2746](https://github.com/argoproj/argo-rollouts/issues/2746)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.9 to 1.25.10 ([#2745](https://github.com/argoproj/argo-rollouts/issues/2745)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.32 to 1.18.33 ([#2943](https://github.com/argoproj/argo-rollouts/issues/2943)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.9 to 1.19.10 ([#2747](https://github.com/argoproj/argo-rollouts/issues/2747)) +* **deps:** bump codecov/codecov-action from 3.1.2 to 3.1.3 ([#2735](https://github.com/argoproj/argo-rollouts/issues/2735)) +* **deps:** bump actions/setup-go from 4.0.1 to 4.1.0 ([#2947](https://github.com/argoproj/argo-rollouts/issues/2947)) +* **deps:** bump github.com/prometheus/client_golang from 1.14.0 to 1.15.0 ([#2721](https://github.com/argoproj/argo-rollouts/issues/2721)) +* **deps:** bump codecov/codecov-action from 3.1.1 to 3.1.2 ([#2711](https://github.com/argoproj/argo-rollouts/issues/2711)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.20 to 1.18.21 ([#2709](https://github.com/argoproj/argo-rollouts/issues/2709)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.8 to 1.19.9 ([#2708](https://github.com/argoproj/argo-rollouts/issues/2708)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.8 to 1.25.9 ([#2710](https://github.com/argoproj/argo-rollouts/issues/2710)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.19 to 1.18.20 ([#2705](https://github.com/argoproj/argo-rollouts/issues/2705)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.7 to 1.19.8 ([#2704](https://github.com/argoproj/argo-rollouts/issues/2704)) +* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.17.7 to 1.17.8 ([#2703](https://github.com/argoproj/argo-rollouts/issues/2703)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.7 to 1.25.8 ([#2702](https://github.com/argoproj/argo-rollouts/issues/2702)) +* **deps:** bump peter-evans/create-pull-request from 4 to 5 ([#2697](https://github.com/argoproj/argo-rollouts/issues/2697)) +* **deps:** bump github.com/spf13/cobra from 1.6.1 to 1.7.0 ([#2698](https://github.com/argoproj/argo-rollouts/issues/2698)) +* **deps:** bump github.com/influxdata/influxdb-client-go/v2 from 2.12.2 to 2.12.3 ([#2684](https://github.com/argoproj/argo-rollouts/issues/2684)) + +### Ci + +* generate attestations during a release ([#2785](https://github.com/argoproj/argo-rollouts/issues/2785)) +* use keyless signing for main and release branches ([#2783](https://github.com/argoproj/argo-rollouts/issues/2783)) + +### Docs + +* support for Google Cloud Load balancers ([#2803](https://github.com/argoproj/argo-rollouts/issues/2803)) +* Update Changelog ([#2683](https://github.com/argoproj/argo-rollouts/issues/2683)) +* mirroring support in Traefik is not implemented yet ([#2904](https://github.com/argoproj/argo-rollouts/issues/2904)) +* Update docs of Rollout spec to add active/previewMetadata ([#2833](https://github.com/argoproj/argo-rollouts/issues/2833)) +* Update datadog.md - clarify formulas [#2813](https://github.com/argoproj/argo-rollouts/issues/2813) ([#2819](https://github.com/argoproj/argo-rollouts/issues/2819)) +* support for Kong ingress ([#2820](https://github.com/argoproj/argo-rollouts/issues/2820)) +* Fix AWS App Mesh getting started documentation to avoid connection pooling problems ([#2814](https://github.com/argoproj/argo-rollouts/issues/2814)) +* Update Changelog ([#2807](https://github.com/argoproj/argo-rollouts/issues/2807)) +* use correct capitalization for "Datadog" in navigation sidebar ([#2809](https://github.com/argoproj/argo-rollouts/issues/2809)) +* Add gateway API link, fix Contour plugin naming ([#2787](https://github.com/argoproj/argo-rollouts/issues/2787)) +* fix minor mistakes in Migrating to Deployments ([#2270](https://github.com/argoproj/argo-rollouts/issues/2270)) +* Show how plugins are loaded ([#2801](https://github.com/argoproj/argo-rollouts/issues/2801)) +* Fix typo in header routing specification docs ([#2808](https://github.com/argoproj/argo-rollouts/issues/2808)) +* Add some details around running locally to make things clearer new contributors ([#2786](https://github.com/argoproj/argo-rollouts/issues/2786)) +* Add docs for Amazon Managed Prometheus ([#2777](https://github.com/argoproj/argo-rollouts/issues/2777)) +* Update Changelog ([#2765](https://github.com/argoproj/argo-rollouts/issues/2765)) +* copy argo cd docs drop down fix ([#2731](https://github.com/argoproj/argo-rollouts/issues/2731)) +* Add contour trafficrouter plugin ([#2729](https://github.com/argoproj/argo-rollouts/issues/2729)) +* fix link to plugins for traffic routers ([#2719](https://github.com/argoproj/argo-rollouts/issues/2719)) +* update contributions.md to include k3d as recommended cluster, add details on e2e test setup, and update kubectl install link. Fixes [#1750](https://github.com/argoproj/argo-rollouts/issues/1750) ([#1867](https://github.com/argoproj/argo-rollouts/issues/1867)) +* **analysis:** fix use stringData in the examples ([#2715](https://github.com/argoproj/argo-rollouts/issues/2715)) +* **example:** interval requires count ([#2690](https://github.com/argoproj/argo-rollouts/issues/2690)) +* **example:** Add example on how to execute subset of e2e tests ([#2867](https://github.com/argoproj/argo-rollouts/issues/2867)) + +### Feat + +* enable self service notification support ([#2930](https://github.com/argoproj/argo-rollouts/issues/2930)) +* support prometheus headers ([#2937](https://github.com/argoproj/argo-rollouts/issues/2937)) +* Add insecure option for Prometheus. Fixes [#2913](https://github.com/argoproj/argo-rollouts/issues/2913) ([#2914](https://github.com/argoproj/argo-rollouts/issues/2914)) +* Add prometheus timeout ([#2893](https://github.com/argoproj/argo-rollouts/issues/2893)) +* Support Multiple ALB Ingresses ([#2639](https://github.com/argoproj/argo-rollouts/issues/2639)) +* Send informer add k8s event ([#2834](https://github.com/argoproj/argo-rollouts/issues/2834)) +* add merge key to analysis template ([#2842](https://github.com/argoproj/argo-rollouts/issues/2842)) +* retain TLS configuration for canary ingresses in the nginx integration. Fixes [#1134](https://github.com/argoproj/argo-rollouts/issues/1134) ([#2679](https://github.com/argoproj/argo-rollouts/issues/2679)) +* **analysis:** Adds rollout Spec.Selector.MatchLabels to AnalysisRun. Fixes [#2888](https://github.com/argoproj/argo-rollouts/issues/2888) ([#2903](https://github.com/argoproj/argo-rollouts/issues/2903)) +* **controller:** Add custom metadata support for AnalysisRun. Fixes [#2740](https://github.com/argoproj/argo-rollouts/issues/2740) ([#2743](https://github.com/argoproj/argo-rollouts/issues/2743)) +* **dashboard:** Refresh Rollouts dashboard UI ([#2723](https://github.com/argoproj/argo-rollouts/issues/2723)) +* **metricprovider:** allow user to define metrics.provider.job.metadata ([#2762](https://github.com/argoproj/argo-rollouts/issues/2762)) + +### Fix + +* istio dropping fields during removing of managed routes ([#2692](https://github.com/argoproj/argo-rollouts/issues/2692)) +* resolve args to metric in garbage collection function ([#2843](https://github.com/argoproj/argo-rollouts/issues/2843)) +* rollout not modify the VirtualService whit setHeaderRoute step with workloadRef ([#2797](https://github.com/argoproj/argo-rollouts/issues/2797)) +* get new httpRoutesI after removeRoute() to avoid duplicates. Fixes [#2769](https://github.com/argoproj/argo-rollouts/issues/2769) ([#2887](https://github.com/argoproj/argo-rollouts/issues/2887)) +* make new alb fullName field optional for backward compatability ([#2806](https://github.com/argoproj/argo-rollouts/issues/2806)) +* change logic of analysis run to better handle errors ([#2695](https://github.com/argoproj/argo-rollouts/issues/2695)) +* cloudwatch metrics provider multiple dimensions ([#2932](https://github.com/argoproj/argo-rollouts/issues/2932)) +* add required ingress permission ([#2933](https://github.com/argoproj/argo-rollouts/issues/2933)) +* properly wrap Datadog API v2 request body ([#2771](https://github.com/argoproj/argo-rollouts/issues/2771)) ([#2775](https://github.com/argoproj/argo-rollouts/issues/2775)) +* **analysis:** Graphite query - remove whitespaces ([#2752](https://github.com/argoproj/argo-rollouts/issues/2752)) +* **analysis:** Graphite metric provider - index out of range [0] with length 0 ([#2751](https://github.com/argoproj/argo-rollouts/issues/2751)) +* **analysis:** Adding field in YAML to provide region for Sigv4 signing. ([#2794](https://github.com/argoproj/argo-rollouts/issues/2794)) +* **controller:** Fix for rollouts getting stuck in loop ([#2689](https://github.com/argoproj/argo-rollouts/issues/2689)) +* **controller:** Remove name label from some k8s client metrics on events and replicasets ([#2851](https://github.com/argoproj/argo-rollouts/issues/2851)) +* **controller:** Add klog logrus bridge. Fixes [#2707](https://github.com/argoproj/argo-rollouts/issues/2707). ([#2701](https://github.com/argoproj/argo-rollouts/issues/2701)) +* **trafficrouting:** apply stable selectors on canary service on rollout abort [#2781](https://github.com/argoproj/argo-rollouts/issues/2781) ([#2818](https://github.com/argoproj/argo-rollouts/issues/2818)) + +### Refactor + +* change plugin naming pattern [#2720](https://github.com/argoproj/argo-rollouts/issues/2720) ([#2722](https://github.com/argoproj/argo-rollouts/issues/2722)) + +### BREAKING CHANGE + + +The metric labels have changed on controller_clientset_k8s_request_total to not include the name of the resource for events and replicasets. These names have generated hashes in them and cause really high cardinality. + +Remove name label from k8s some client metrics + +The `name` label in the `controller_clientset_k8s_request_total` metric +produce an excessive amount of cardinality for `events` and `replicasets`. +This can lead to hundreds of thousands of unique metrics over a couple +weeks in a large deployment. Set the name to "N/A" for these client request +types. + + ## [v1.5.1](https://github.com/argoproj/argo-rollouts/compare/v1.5.0...v1.5.1) (2023-05-24) @@ -52,8 +315,8 @@ * switch to distroless for cli/dashboard image ([#2596](https://github.com/argoproj/argo-rollouts/issues/2596)) * add Tuhu to users ([#2630](https://github.com/argoproj/argo-rollouts/issues/2630)) * bump deps for prisma ([#2643](https://github.com/argoproj/argo-rollouts/issues/2643)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.6 to 1.25.7 ([#2682](https://github.com/argoproj/argo-rollouts/issues/2682)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.15 to 1.18.16 ([#2652](https://github.com/argoproj/argo-rollouts/issues/2652)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.13 to 1.18.14 ([#2614](https://github.com/argoproj/argo-rollouts/issues/2614)) +* **deps:** bump github.com/antonmedv/expr from 1.12.3 to 1.12.5 ([#2670](https://github.com/argoproj/argo-rollouts/issues/2670)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.16 to 1.18.17 ([#2659](https://github.com/argoproj/argo-rollouts/issues/2659)) * **deps:** bump github.com/antonmedv/expr from 1.12.2 to 1.12.3 ([#2653](https://github.com/argoproj/argo-rollouts/issues/2653)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.5 to 1.19.6 ([#2654](https://github.com/argoproj/argo-rollouts/issues/2654)) @@ -68,20 +331,20 @@ * **deps:** bump google.golang.org/protobuf from 1.29.0 to 1.29.1 ([#2660](https://github.com/argoproj/argo-rollouts/issues/2660)) * **deps:** bump google.golang.org/protobuf from 1.29.1 to 1.30.0 ([#2665](https://github.com/argoproj/argo-rollouts/issues/2665)) * **deps:** bump github.com/stretchr/testify from 1.8.1 to 1.8.2 ([#2627](https://github.com/argoproj/argo-rollouts/issues/2627)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.6 to 1.19.7 ([#2672](https://github.com/argoproj/argo-rollouts/issues/2672)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.14 to 1.18.15 ([#2618](https://github.com/argoproj/argo-rollouts/issues/2618)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.3 to 1.25.4 ([#2617](https://github.com/argoproj/argo-rollouts/issues/2617)) * **deps:** bump github.com/antonmedv/expr from 1.12.0 to 1.12.1 ([#2619](https://github.com/argoproj/argo-rollouts/issues/2619)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.4 to 1.19.5 ([#2616](https://github.com/argoproj/argo-rollouts/issues/2616)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.3 to 1.19.4 ([#2612](https://github.com/argoproj/argo-rollouts/issues/2612)) * **deps:** bump github.com/prometheus/common from 0.39.0 to 0.40.0 ([#2611](https://github.com/argoproj/argo-rollouts/issues/2611)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.13 to 1.18.14 ([#2614](https://github.com/argoproj/argo-rollouts/issues/2614)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.6 to 1.25.7 ([#2682](https://github.com/argoproj/argo-rollouts/issues/2682)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.2 to 1.25.3 ([#2615](https://github.com/argoproj/argo-rollouts/issues/2615)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.14 to 1.18.15 ([#2618](https://github.com/argoproj/argo-rollouts/issues/2618)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 from 1.19.6 to 1.19.7 ([#2672](https://github.com/argoproj/argo-rollouts/issues/2672)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.17 to 1.18.19 ([#2673](https://github.com/argoproj/argo-rollouts/issues/2673)) * **deps:** bump imjasonh/setup-crane from 0.2 to 0.3 ([#2600](https://github.com/argoproj/argo-rollouts/issues/2600)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.25.5 to 1.25.6 ([#2671](https://github.com/argoproj/argo-rollouts/issues/2671)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2593](https://github.com/argoproj/argo-rollouts/issues/2593)) -* **deps:** bump github.com/antonmedv/expr from 1.12.3 to 1.12.5 ([#2670](https://github.com/argoproj/argo-rollouts/issues/2670)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/config from 1.18.15 to 1.18.16 ([#2652](https://github.com/argoproj/argo-rollouts/issues/2652)) * **deps:** bump google.golang.org/grpc from 1.52.3 to 1.53.0 ([#2574](https://github.com/argoproj/argo-rollouts/issues/2574)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2565](https://github.com/argoproj/argo-rollouts/issues/2565)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2564](https://github.com/argoproj/argo-rollouts/issues/2564)) @@ -143,12 +406,12 @@ ### Fix -* update GetTargetGroupMetadata to call DescribeTags in batches ([#2570](https://github.com/argoproj/argo-rollouts/issues/2570)) * switch service selector back to stable on canary service when aborted ([#2540](https://github.com/argoproj/argo-rollouts/issues/2540)) * change log generator to only add CHANGELOG.md ([#2626](https://github.com/argoproj/argo-rollouts/issues/2626)) * Rollback change on service creation with weightless experiments ([#2624](https://github.com/argoproj/argo-rollouts/issues/2624)) * flakey TestWriteBackToInformer test ([#2621](https://github.com/argoproj/argo-rollouts/issues/2621)) * remove outdated ioutil package dependencies ([#2583](https://github.com/argoproj/argo-rollouts/issues/2583)) +* update GetTargetGroupMetadata to call DescribeTags in batches ([#2570](https://github.com/argoproj/argo-rollouts/issues/2570)) * analysis information box [#2530](https://github.com/argoproj/argo-rollouts/issues/2530) ([#2575](https://github.com/argoproj/argo-rollouts/issues/2575)) * support only tls in virtual services ([#2502](https://github.com/argoproj/argo-rollouts/issues/2502)) * **analysis:** Nil Pointer Fixes [#2458](https://github.com/argoproj/argo-rollouts/issues/2458) ([#2680](https://github.com/argoproj/argo-rollouts/issues/2680)) @@ -214,21 +477,21 @@ There was an unintentional change in behavior related to service creation with e ### Chore -* Add Yotpo to USERS.md +* add optum to users list ([#2466](https://github.com/argoproj/argo-rollouts/issues/2466)) * upgrade golang to 1.19 ([#2219](https://github.com/argoproj/argo-rollouts/issues/2219)) -* remove deprecated -i for go build ([#2047](https://github.com/argoproj/argo-rollouts/issues/2047)) -* rename the examples/trafffic-management directory to istio ([#2315](https://github.com/argoproj/argo-rollouts/issues/2315)) +* sign container images and checksum assets ([#2334](https://github.com/argoproj/argo-rollouts/issues/2334)) * update stable tag conditionally ([#2480](https://github.com/argoproj/argo-rollouts/issues/2480)) * fix checksum generation ([#2481](https://github.com/argoproj/argo-rollouts/issues/2481)) -* add optum to users list ([#2466](https://github.com/argoproj/argo-rollouts/issues/2466)) +* Add Yotpo to USERS.md * use docker login to sign images ([#2479](https://github.com/argoproj/argo-rollouts/issues/2479)) * use correct image for plugin container ([#2478](https://github.com/argoproj/argo-rollouts/issues/2478)) +* rename the examples/trafffic-management directory to istio ([#2315](https://github.com/argoproj/argo-rollouts/issues/2315)) * Add example for istio-subset-split ([#2318](https://github.com/argoproj/argo-rollouts/issues/2318)) * add deprecation notice for rollout_phase in docs ([#2377](https://github.com/argoproj/argo-rollouts/issues/2377)) ([#2378](https://github.com/argoproj/argo-rollouts/issues/2378)) -* sign container images and checksum assets ([#2334](https://github.com/argoproj/argo-rollouts/issues/2334)) +* remove deprecated -i for go build ([#2047](https://github.com/argoproj/argo-rollouts/issues/2047)) * **cli:** add darwin arm64 to build and release ([#2264](https://github.com/argoproj/argo-rollouts/issues/2264)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2487](https://github.com/argoproj/argo-rollouts/issues/2487)) -* **deps:** bump github.com/prometheus/common from 0.37.0 to 0.38.0 ([#2468](https://github.com/argoproj/argo-rollouts/issues/2468)) +* **deps:** upgrade ui deps to fix high security cve's ([#2345](https://github.com/argoproj/argo-rollouts/issues/2345)) +* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.17.0 to 1.17.1 ([#2369](https://github.com/argoproj/argo-rollouts/issues/2369)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2455](https://github.com/argoproj/argo-rollouts/issues/2455)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2454](https://github.com/argoproj/argo-rollouts/issues/2454)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2452](https://github.com/argoproj/argo-rollouts/issues/2452)) @@ -244,23 +507,23 @@ There was an unintentional change in behavior related to service creation with e * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2413](https://github.com/argoproj/argo-rollouts/issues/2413)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2412](https://github.com/argoproj/argo-rollouts/issues/2412)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2409](https://github.com/argoproj/argo-rollouts/issues/2409)) -* **deps:** bump github.com/prometheus/client_golang ([#2469](https://github.com/argoproj/argo-rollouts/issues/2469)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2406](https://github.com/argoproj/argo-rollouts/issues/2406)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2404](https://github.com/argoproj/argo-rollouts/issues/2404)) -* **deps:** bump notification engine ([#2470](https://github.com/argoproj/argo-rollouts/issues/2470)) +* **deps:** bump github.com/prometheus/client_golang ([#2469](https://github.com/argoproj/argo-rollouts/issues/2469)) * **deps:** bump codecov/codecov-action from 2.1.0 to 3.1.1 ([#2251](https://github.com/argoproj/argo-rollouts/issues/2251)) +* **deps:** bump notification engine ([#2470](https://github.com/argoproj/argo-rollouts/issues/2470)) * **deps:** bump github.com/prometheus/common from 0.38.0 to 0.39.0 ([#2476](https://github.com/argoproj/argo-rollouts/issues/2476)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2477](https://github.com/argoproj/argo-rollouts/issues/2477)) -* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.17.2 to 1.17.3 ([#2484](https://github.com/argoproj/argo-rollouts/issues/2484)) * **deps:** bump dependabot/fetch-metadata from 1.3.4 to 1.3.5 ([#2390](https://github.com/argoproj/argo-rollouts/issues/2390)) * **deps:** bump imjasonh/setup-crane from 0.1 to 0.2 ([#2387](https://github.com/argoproj/argo-rollouts/issues/2387)) -* **deps:** upgrade ui deps to fix high security cve's ([#2345](https://github.com/argoproj/argo-rollouts/issues/2345)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2406](https://github.com/argoproj/argo-rollouts/issues/2406)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2487](https://github.com/argoproj/argo-rollouts/issues/2487)) +* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.17.2 to 1.17.3 ([#2484](https://github.com/argoproj/argo-rollouts/issues/2484)) * **deps:** bump actions/upload-artifact from 2 to 3 ([#1973](https://github.com/argoproj/argo-rollouts/issues/1973)) * **deps:** bump github.com/influxdata/influxdb-client-go/v2 ([#2381](https://github.com/argoproj/argo-rollouts/issues/2381)) * **deps:** bump github.com/spf13/cobra from 1.6.0 to 1.6.1 ([#2370](https://github.com/argoproj/argo-rollouts/issues/2370)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/cloudwatch ([#2366](https://github.com/argoproj/argo-rollouts/issues/2366)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2367](https://github.com/argoproj/argo-rollouts/issues/2367)) -* **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.17.0 to 1.17.1 ([#2369](https://github.com/argoproj/argo-rollouts/issues/2369)) +* **deps:** bump github.com/prometheus/common from 0.37.0 to 0.38.0 ([#2468](https://github.com/argoproj/argo-rollouts/issues/2468)) * **deps:** bump github.com/stretchr/testify from 1.8.0 to 1.8.1 ([#2368](https://github.com/argoproj/argo-rollouts/issues/2368)) * **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2365](https://github.com/argoproj/argo-rollouts/issues/2365)) * **deps:** bump github.com/aws/aws-sdk-go-v2 from 1.16.16 to 1.17.0 ([#2364](https://github.com/argoproj/argo-rollouts/issues/2364)) @@ -283,7 +546,7 @@ There was an unintentional change in behavior related to service creation with e * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2294](https://github.com/argoproj/argo-rollouts/issues/2294)) * **deps:** bump google.golang.org/grpc from 1.47.0 to 1.50.0 ([#2293](https://github.com/argoproj/argo-rollouts/issues/2293)) * **deps:** bump docker/metadata-action from 3 to 4 ([#2292](https://github.com/argoproj/argo-rollouts/issues/2292)) -* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2486](https://github.com/argoproj/argo-rollouts/issues/2486)) +* **deps:** bump github/codeql-action from 1 to 2 ([#2289](https://github.com/argoproj/argo-rollouts/issues/2289)) * **deps:** bump docker/login-action from 1 to 2 ([#2288](https://github.com/argoproj/argo-rollouts/issues/2288)) * **deps:** bump actions/setup-go from 2 to 3 ([#2287](https://github.com/argoproj/argo-rollouts/issues/2287)) * **deps:** bump dependabot/fetch-metadata from 1.3.3 to 1.3.4 ([#2286](https://github.com/argoproj/argo-rollouts/issues/2286)) @@ -292,8 +555,8 @@ There was an unintentional change in behavior related to service creation with e * **deps:** bump actions/cache from 2 to 3.0.1 ([#1940](https://github.com/argoproj/argo-rollouts/issues/1940)) * **deps:** bump docker/setup-qemu-action from 1 to 2 ([#2284](https://github.com/argoproj/argo-rollouts/issues/2284)) * **deps:** bump actions/checkout from 2 to 3.1.0 ([#2283](https://github.com/argoproj/argo-rollouts/issues/2283)) +* **deps:** bump github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 ([#2486](https://github.com/argoproj/argo-rollouts/issues/2486)) * **deps:** bump github.com/aws/aws-sdk-go-v2/config ([#2485](https://github.com/argoproj/argo-rollouts/issues/2485)) -* **deps:** bump github/codeql-action from 1 to 2 ([#2289](https://github.com/argoproj/argo-rollouts/issues/2289)) ### Ci @@ -312,23 +575,23 @@ There was an unintentional change in behavior related to service creation with e ### Docs -* Add traffic router support to readme ([#2444](https://github.com/argoproj/argo-rollouts/issues/2444)) -* fix typo in helm Argo rollouts ([#2442](https://github.com/argoproj/argo-rollouts/issues/2442)) +* common questions for Rollbacks ([#2027](https://github.com/argoproj/argo-rollouts/issues/2027)) * correct syntax of canary setMirrorRoute's value ([#2431](https://github.com/argoproj/argo-rollouts/issues/2431)) -* Explain upgrade process ([#2424](https://github.com/argoproj/argo-rollouts/issues/2424)) +* add artifact badge ([#2331](https://github.com/argoproj/argo-rollouts/issues/2331)) +* Use new Google Analytics 4 site tag ([#2299](https://github.com/argoproj/argo-rollouts/issues/2299)) * add progressive delivery with gitops example for openshift ([#2400](https://github.com/argoproj/argo-rollouts/issues/2400)) * fix !important block typo ([#2372](https://github.com/argoproj/argo-rollouts/issues/2372)) * mention supported versions ([#2163](https://github.com/argoproj/argo-rollouts/issues/2163)) * Added blog post for minimize impact in Kubernetes using Progressive Delivery and customer side impact ([#2355](https://github.com/argoproj/argo-rollouts/issues/2355)) -* Update docs for new openapi kustomize support ([#2216](https://github.com/argoproj/argo-rollouts/issues/2216)) -* add artifact badge ([#2331](https://github.com/argoproj/argo-rollouts/issues/2331)) -* Use new Google Analytics 4 site tag ([#2299](https://github.com/argoproj/argo-rollouts/issues/2299)) +* add Opensurvey to USERS.md ([#2195](https://github.com/argoproj/argo-rollouts/issues/2195)) +* fix typo in helm Argo rollouts ([#2442](https://github.com/argoproj/argo-rollouts/issues/2442)) +* Explain upgrade process ([#2424](https://github.com/argoproj/argo-rollouts/issues/2424)) * Fixed read the docs rendering ([#2277](https://github.com/argoproj/argo-rollouts/issues/2277)) -* common questions for Rollbacks ([#2027](https://github.com/argoproj/argo-rollouts/issues/2027)) +* Add traffic router support to readme ([#2444](https://github.com/argoproj/argo-rollouts/issues/2444)) * add OpsVerse as an official user (USERS.md) ([#2209](https://github.com/argoproj/argo-rollouts/issues/2209)) * Fix the controller annotation to enable data scrapping ([#2238](https://github.com/argoproj/argo-rollouts/issues/2238)) * Update release docs for versioned formula ([#2245](https://github.com/argoproj/argo-rollouts/issues/2245)) -* add Opensurvey to USERS.md ([#2195](https://github.com/argoproj/argo-rollouts/issues/2195)) +* Update docs for new openapi kustomize support ([#2216](https://github.com/argoproj/argo-rollouts/issues/2216)) * **trafficrouting:** fix docs warning to github style markdown ([#2342](https://github.com/argoproj/argo-rollouts/issues/2342)) ### Feat @@ -336,30 +599,30 @@ There was an unintentional change in behavior related to service creation with e * Implement Issue [#1779](https://github.com/argoproj/argo-rollouts/issues/1779): add rollout.Spec.Strategy.Canary.MinPodsPerReplicaSet ([#2448](https://github.com/argoproj/argo-rollouts/issues/2448)) * Apache APISIX support. Fixes [#2395](https://github.com/argoproj/argo-rollouts/issues/2395) ([#2437](https://github.com/argoproj/argo-rollouts/issues/2437)) * rollback windows. Fixes [#574](https://github.com/argoproj/argo-rollouts/issues/574) ([#2394](https://github.com/argoproj/argo-rollouts/issues/2394)) -* Support TCP routes traffic splitting for Istio VirtualService ([#1659](https://github.com/argoproj/argo-rollouts/issues/1659)) * add support for getting the replicaset name via templating ([#2396](https://github.com/argoproj/argo-rollouts/issues/2396)) * Allow Traffic shaping through header based routing for ALB ([#2214](https://github.com/argoproj/argo-rollouts/issues/2214)) * Add support for spec.ingressClassName ([#2178](https://github.com/argoproj/argo-rollouts/issues/2178)) +* Support TCP routes traffic splitting for Istio VirtualService ([#1659](https://github.com/argoproj/argo-rollouts/issues/1659)) * **cli:** dynamic shell completion for main resources names (rollouts, experiments, analysisrun) ([#2379](https://github.com/argoproj/argo-rollouts/issues/2379)) * **cli:** add port flag for dashboard command ([#2383](https://github.com/argoproj/argo-rollouts/issues/2383)) * **controller:** don't hardcode experiment ports; always create service ([#2397](https://github.com/argoproj/argo-rollouts/issues/2397)) ### Fix -* set gopath in makefile ([#2398](https://github.com/argoproj/argo-rollouts/issues/2398)) * dev build can set DEV_IMAGE=true ([#2440](https://github.com/argoproj/argo-rollouts/issues/2440)) * add patch verb to deployment resource ([#2407](https://github.com/argoproj/argo-rollouts/issues/2407)) * rootPath support so that it uses the embedded files system ([#2198](https://github.com/argoproj/argo-rollouts/issues/2198)) +* set gopath in makefile ([#2398](https://github.com/argoproj/argo-rollouts/issues/2398)) * change completed condition so it only triggers on pod hash changes also adds an event for when it does changes. ([#2203](https://github.com/argoproj/argo-rollouts/issues/2203)) * enable notifications without when condition ([#2231](https://github.com/argoproj/argo-rollouts/issues/2231)) * UI not redirecting on / ([#2252](https://github.com/argoproj/argo-rollouts/issues/2252)) * nil pointer while linting with basic canary and ingresses ([#2256](https://github.com/argoproj/argo-rollouts/issues/2256)) -* **analysis:** Fix Analysis Terminal Decision For Dry-Run Metrics ([#2399](https://github.com/argoproj/argo-rollouts/issues/2399)) * **analysis:** Make AR End When Only Dry-Run Metrics Are Defined ([#2230](https://github.com/argoproj/argo-rollouts/issues/2230)) +* **analysis:** Fix Analysis Terminal Decision For Dry-Run Metrics ([#2399](https://github.com/argoproj/argo-rollouts/issues/2399)) * **analysis:** Avoid Infinite Error Message Append For Failed Dry-Run Metrics ([#2182](https://github.com/argoproj/argo-rollouts/issues/2182)) * **cli:** nil pointer while linting ([#2324](https://github.com/argoproj/argo-rollouts/issues/2324)) -* **controller:** leader election preventing two controllers running and gracefully shutting down ([#2291](https://github.com/argoproj/argo-rollouts/issues/2291)) * **controller:** Fix k8s clientset controller metrics. Fixes [#2139](https://github.com/argoproj/argo-rollouts/issues/2139) ([#2261](https://github.com/argoproj/argo-rollouts/issues/2261)) +* **controller:** leader election preventing two controllers running and gracefully shutting down ([#2291](https://github.com/argoproj/argo-rollouts/issues/2291)) * **dashboard:** correct mime type is returned. Fixes: [#2290](https://github.com/argoproj/argo-rollouts/issues/2290) ([#2303](https://github.com/argoproj/argo-rollouts/issues/2303)) * **example:** correct docs when metrics got result empty ([#2309](https://github.com/argoproj/argo-rollouts/issues/2309)) * **metricprovider:** Support jsonBody for web metric provider Fixes [#2275](https://github.com/argoproj/argo-rollouts/issues/2275) ([#2312](https://github.com/argoproj/argo-rollouts/issues/2312)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..e445d367af --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +See [docs/CONTRIBUTING.md](docs/CONTRIBUTING.md). diff --git a/Dockerfile b/Dockerfile index 096b150517..4998a43454 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image # Also used as the image in CI jobs so needs all dependencies #################################################################################################### -FROM --platform=$BUILDPLATFORM golang:1.20 as builder +FROM --platform=$BUILDPLATFORM golang:1.21 as builder RUN apt-get update && apt-get install -y \ wget \ @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Install golangci-lint -RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.3 && \ +RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.57.2 && \ golangci-lint linters COPY .golangci.yml ${GOPATH}/src/dummy/.golangci.yml @@ -40,7 +40,7 @@ RUN NODE_ENV='production' yarn build #################################################################################################### # Rollout Controller Build stage which performs the actual build of argo-rollouts binaries #################################################################################################### -FROM --platform=$BUILDPLATFORM golang:1.20 as argo-rollouts-build +FROM --platform=$BUILDPLATFORM golang:1.21 as argo-rollouts-build WORKDIR /go/src/github.com/argoproj/argo-rollouts diff --git a/Makefile b/Makefile index 638498ff85..a9216aa4ff 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,9 @@ DEV_IMAGE ?= false # E2E variables E2E_INSTANCE_ID ?= argo-rollouts-e2e -E2E_TEST_OPTIONS ?= +E2E_TEST_OPTIONS ?= E2E_PARALLEL ?= 1 -E2E_WAIT_TIMEOUT ?= 120 +E2E_WAIT_TIMEOUT ?= 90 GOPATH ?= $(shell go env GOPATH) override LDFLAGS += \ @@ -111,7 +111,7 @@ gen-proto: k8s-proto api-proto ui-proto # generates the .proto files affected by changes to types.go .PHONY: k8s-proto k8s-proto: go-mod-vendor $(TYPES) ## generate kubernetes protobuf files - PATH=${DIST_DIR}:$$PATH go-to-protobuf \ + PATH=${DIST_DIR}:$$PATH GOPATH=${GOPATH} go-to-protobuf \ --go-header-file=./hack/custom-boilerplate.go.txt \ --packages=github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1 \ --apimachinery-packages=${APIMACHINERY_PKGS} \ @@ -149,7 +149,7 @@ gen-mocks: install-go-tools-local ## generate mock files # generates openapi_generated.go .PHONY: gen-openapi gen-openapi: $(DIST_DIR)/openapi-gen ## generate openapi files - PATH=${DIST_DIR}:$$PATH openapi-gen \ + PATH=${DIST_DIR}:$$PATH GOPATH=${GOPATH} openapi-gen \ --go-header-file ${CURRENT_DIR}/hack/custom-boilerplate.go.txt \ --input-dirs github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1 \ --output-package github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1 \ diff --git a/README.md b/README.md index 977a35eaa6..dc6c87d023 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![codecov](https://codecov.io/gh/argoproj/argo-rollouts/branch/master/graph/badge.svg)](https://codecov.io/gh/argoproj/argo-rollouts) [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3834/badge)](https://bestpractices.coreinfrastructure.org/projects/3834) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-rollouts/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-rollouts) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-rollouts)](https://artifacthub.io/packages/helm/argo/argo-rollouts) ## What is Argo Rollouts? @@ -48,20 +49,24 @@ For these reasons, in large scale high-volume production environments, a rolling * Metric provider integration: Prometheus, Wavefront, Kayenta, Web, Kubernetes Jobs, Datadog, New Relic, InfluxDB ## Supported Traffic Shaping Integrations -| Traffic Shaping Integration | SetWeight | SetWeightExperiments | SetMirror | SetHeader | -|-----------------------------------|------------------------------|-----------------------------|----------------------------|----------------------------| -| ALB Ingress Controller | :white_check_mark: (stable) | :white_check_mark: (stable) | :x: | :white_check_mark: (alpha) | -| Ambassador | :white_check_mark: (stable) | :x: | :x: | :x: | -| Apache APISIX Ingress Controller | :white_check_mark: (alpha) | :x: | :x: | :white_check_mark: (alpha) | -| Istio | :white_check_mark: (stable) | :white_check_mark: (stable) | :white_check_mark: (alpha) | :white_check_mark: (alpha) | -| Nginx Ingress Controller | :white_check_mark: (stable) | :x: | :x: | :x: | -| SMI | :white_check_mark: (stable) | :white_check_mark: (stable) | :x: | :x: | -| Traefik | :white_check_mark: (beta) | :x: | :x: | :x: | +| Traffic Shaping Integration | SetWeight | SetWeightExperiments | SetMirror | SetHeader | Implemented As Plugin | +|-----------------------------------|------------------------------|-----------------------------|----------------------------|----------------------------|-----------------------------| +| ALB Ingress Controller | :white_check_mark: (stable) | :white_check_mark: (stable) | :x: | :white_check_mark: (alpha) | | +| Ambassador | :white_check_mark: (stable) | :x: | :x: | :x: | | +| Apache APISIX Ingress Controller | :white_check_mark: (alpha) | :x: | :x: | :white_check_mark: (alpha) | | +| Istio | :white_check_mark: (stable) | :white_check_mark: (stable) | :white_check_mark: (alpha) | :white_check_mark: (alpha) | | +| Nginx Ingress Controller | :white_check_mark: (stable) | :x: | :x: | :x: | | +| SMI | :white_check_mark: (stable) | :white_check_mark: (stable) | :x: | :x: | | +| Traefik | :white_check_mark: (beta) | :x: | :x: | :x: | | +| Contour | :white_check_mark: (beta) | :x: | :x: | :x: | :heavy_check_mark: | +| Gateway API | :white_check_mark: (alpha) | :x: | :x: | :x: | :heavy_check_mark: | :white_check_mark: = Supported :x: = Not Supported +:heavy_check_mark: = Yes + ## Documentation To learn more about Argo Rollouts go to the [complete documentation](https://argo-rollouts.readthedocs.io/en/stable/). @@ -96,3 +101,6 @@ You can reach the Argo Rollouts community and developers via the following chann * [How Scalable is Argo-Rollouts: A Cloud Operator’s Perspective](https://www.youtube.com/watch?v=rCEhxJ2NSTI) * [Minimize Impact in Kubernetes Using Argo Rollouts](https://medium.com/@arielsimhon/minimize-impact-in-kubernetes-using-argo-rollouts-992fb9519969) * [Progressive Application Delivery with GitOps on Red Hat OpenShift](https://www.youtube.com/watch?v=DfeL7cdTx4c) +* [Progressive delivery for Kubernetes Config Maps using Argo Rollouts](https://codefresh.io/blog/progressive-delivery-for-kubernetes-config-maps-using-argo-rollouts/) +* [Multi-Service Progressive Delivery with Argo Rollouts](https://codefresh.io/blog/multi-service-progressive-delivery-with-argo-rollouts/) +* [Progressive Delivery for Stateful Services Using Argo Rollouts](https://codefresh.io/blog/progressive-delivery-for-stateful-services-using-argo-rollouts/) diff --git a/USERS.md b/USERS.md index 587548a806..fc8217db6a 100644 --- a/USERS.md +++ b/USERS.md @@ -1,6 +1,7 @@ ## Who uses Argo Rollouts? Organizations below are **officially** using Argo Rollouts. Please send a PR with your organization name if you are using Argo Rollouts. +1. [Ada](https://www.ada.cx) 1. [ADP](https://www.adp.com) 1. [Akuity](https://akuity.io/) 1. [Alibaba Group](https://www.alibabagroup.com/) @@ -9,15 +10,18 @@ Organizations below are **officially** using Argo Rollouts. Please send a PR wit 1. [Bucketplace](https://www.bucketplace.co.kr/) 1. [BukuKas](https://bukukas.co.id/) 1. [Calm](https://www.calm.com/) +1. [CircleCI](https://circleci.com/) 1. [Codefresh](https://codefresh.io/) 1. [Credit Karma](https://creditkarma.com/) 1. [DaoCloud](https://daocloud.io) 1. [Databricks](https://github.com/databricks) 1. [Devtron Labs](https://github.com/devtron-labs/devtron) +1. [Factorial](https://factorialhr.com) 1. [Farfetch](https://www.farfetch.com/) 1. [Flipkart](https://flipkart.com) 1. [GetYourGuide](https://www.getyourguide.com) 1. [Gllue](https://gllue.com) +1. [HashiCorp](https://www.hashicorp.com/) 1. [Ibotta](https://home.ibotta.com/) 1. [Intuit](https://www.intuit.com/) 1. [New Relic](https://newrelic.com/) @@ -44,5 +48,7 @@ Organizations below are **officially** using Argo Rollouts. Please send a PR wit 1. [Twilio SendGrid](https://sendgrid.com) 1. [Ubie](https://ubie.life/) 1. [VISITS Technologies](https://visits.world/en) +1. [WeLab Bank](https://www.welab.bank/) 1. [Yotpo](https://www.yotpo.com/) 1. [VGS](https://www.vgs.io) +2. [Groww](https://groww.in/) diff --git a/VERSION b/VERSION index 40bdab01a0..14e453023c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.6.1-CR-23199 +v1.7.1-CR-24605 \ No newline at end of file diff --git a/analysis/analysis.go b/analysis/analysis.go index fb307e62e0..97497c4c4d 100644 --- a/analysis/analysis.go +++ b/analysis/analysis.go @@ -9,6 +9,7 @@ import ( log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -40,10 +41,15 @@ type metricTask struct { } func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alpha1.AnalysisRun { + logger := logutil.WithAnalysisRun(origRun) if origRun.Status.Phase.Completed() { + err := c.maybeGarbageCollectAnalysisRun(origRun, logger) + if err != nil { + // TODO(jessesuen): surface errors to controller so they can be retried + logger.Warnf("Failed to garbage collect analysis run: %v", err) + } return origRun } - logger := logutil.WithAnalysisRun(origRun) run := origRun.DeepCopy() if run.Status.MetricResults == nil { @@ -108,6 +114,10 @@ func (c *Controller) reconcileAnalysisRun(origRun *v1alpha1.AnalysisRun) *v1alph run.Status.Message = newMessage if newStatus.Completed() { c.recordAnalysisRunCompletionEvent(run) + if run.Status.CompletedAt == nil { + now := timeutil.MetaNow() + run.Status.CompletedAt = &now + } } } @@ -752,3 +762,40 @@ func (c *Controller) garbageCollectMeasurements(run *v1alpha1.AnalysisRun, measu } return nil } + +func (c *Controller) maybeGarbageCollectAnalysisRun(run *v1alpha1.AnalysisRun, logger *log.Entry) error { + ctx := context.TODO() + if run.DeletionTimestamp != nil || !isAnalysisRunTtlExceeded(run) { + return nil + } + logger.Infof("Trying to cleanup TTL exceeded analysis run") + err := c.argoProjClientset.ArgoprojV1alpha1().AnalysisRuns(run.Namespace).Delete(ctx, run.Name, metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return err + } + return nil +} + +func isAnalysisRunTtlExceeded(run *v1alpha1.AnalysisRun) bool { + // TTL only counted for completed runs with TTL strategy. + if !run.Status.Phase.Completed() || run.Spec.TTLStrategy == nil { + return false + } + // Cannot determine TTL if run has no completion time. + if run.Status.CompletedAt == nil { + return false + } + secondsCompleted := timeutil.MetaNow().Sub(run.Status.CompletedAt.Time).Seconds() + var ttlSeconds *int32 + if run.Status.Phase == v1alpha1.AnalysisPhaseSuccessful && run.Spec.TTLStrategy.SecondsAfterSuccess != nil { + ttlSeconds = run.Spec.TTLStrategy.SecondsAfterSuccess + } else if run.Status.Phase == v1alpha1.AnalysisPhaseFailed && run.Spec.TTLStrategy.SecondsAfterFailure != nil { + ttlSeconds = run.Spec.TTLStrategy.SecondsAfterFailure + } else if run.Spec.TTLStrategy.SecondsAfterCompletion != nil { + ttlSeconds = run.Spec.TTLStrategy.SecondsAfterCompletion + } + if ttlSeconds == nil { + return false + } + return int32(secondsCompleted) > *ttlSeconds +} diff --git a/analysis/analysis_test.go b/analysis/analysis_test.go index cec3446b89..9e7d3fe5f8 100644 --- a/analysis/analysis_test.go +++ b/analysis/analysis_test.go @@ -3,6 +3,7 @@ package analysis import ( "bytes" "context" + "errors" "fmt" "strings" "testing" @@ -14,12 +15,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sschema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + k8stesting "k8s.io/client-go/testing" "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" + logutil "github.com/argoproj/argo-rollouts/utils/log" ) func timePtr(t metav1.Time) *metav1.Time { @@ -1985,3 +1992,412 @@ func TestInvalidMeasurementsRetentionConfigThrowsError(t *testing.T) { assert.Equal(t, v1alpha1.AnalysisPhaseError, newRun.Status.Phase) assert.Equal(t, "Analysis spec invalid: measurementRetention[0]: Rule didn't match any metric name(s)", newRun.Status.Message) } + +func TestExceededTtlChecked(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + testTTLStrategy := func( + t *testing.T, + ttlStrategy *v1alpha1.TTLStrategy, + expiredStatus *v1alpha1.AnalysisRunStatus, + notExpiredStatus *v1alpha1.AnalysisRunStatus) { + testId := string(uuid.NewUUID()) + ttlExpiredRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "expired-run" + testId, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: ttlStrategy, + }, + Status: *expiredStatus, + } + _ = c.reconcileAnalysisRun(ttlExpiredRun) + if notExpiredStatus != nil { + ttlNotExpiredRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-expired-run" + testId, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: ttlStrategy, + }, + Status: *notExpiredStatus, + } + _ = c.reconcileAnalysisRun(ttlNotExpiredRun) + } + + pi := f.expectDeleteAnalysisRunAction(ttlExpiredRun) + assert.Equal(t, fmt.Sprintf("%s/%s", metav1.NamespaceDefault, "expired-run"+testId), f.getDeletedAnalysisRunNamespaceAndName(pi)) + // Nothing else is deleted + assert.Equal(t, 1, len(filterInformerActions(f.client.Actions()))) + // Clear actions to avoid affecting other test instances. + f.client.ClearActions() + f.actions = nil + } + + ttlNotExpiredCompletedTime := f.now.Add(-86400 * time.Second) + ttlExpiredCompletedTime := ttlNotExpiredCompletedTime.Add(-1 * time.Second) + secondsOfOneDay := int32(86400) + + // Test completed TTL. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }) + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }) + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseError, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseError, + }) + // Test successful TTL. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterSuccess: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }) + // Test failed TTL. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterFailure: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }) + + // Test success TTL does not affect failed run. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterSuccess: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }) + // Test failed TTL does not affect successful run. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterFailure: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }) + // Test success TTL overrides completed TTL. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(100000), + SecondsAfterSuccess: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }) + // Test failed TTL overrides completed TTL. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(100000), + SecondsAfterFailure: &secondsOfOneDay, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }) + // Test completed TTL still evaluated when non-matching overrides exist. + testTTLStrategy(t, &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: &secondsOfOneDay, + SecondsAfterFailure: pointer.Int32Ptr(86401), + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, &v1alpha1.AnalysisRunStatus{ + CompletedAt: timePtr(metav1.NewTime(ttlNotExpiredCompletedTime)), + Phase: v1alpha1.AnalysisPhaseFailed, + }) +} + +func TestTtlNotGCInProgressAnalysisRun(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + expectedCount := intstr.FromInt(3) + origRun := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "metric1", + Interval: "60s", + Count: &expectedCount, + Provider: v1alpha1.MetricProvider{ + Job: &v1alpha1.JobMetric{}, + }, + }, + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseRunning, + StartedAt: timePtr(metav1.NewTime(time.Now())), + MetricResults: []v1alpha1.MetricResult{ + { + Name: "metric1", + Phase: v1alpha1.AnalysisPhaseRunning, + Count: 1, + Measurements: []v1alpha1.Measurement{{ + Value: "1", + Phase: v1alpha1.AnalysisPhaseSuccessful, + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + }}, + }, + }, + }, + } + newRun := c.reconcileAnalysisRun(origRun) + assert.Equal(t, v1alpha1.AnalysisPhaseRunning, newRun.Status.Phase) + assert.Nil(t, newRun.Status.CompletedAt) + // Nothing else is deleted + assert.Equal(t, 0, len(filterInformerActions(f.client.Actions()))) +} + +func TestCompletedTimeFilled(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + expectedCount := intstr.FromInt(1) + origRun := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "metric1", + Interval: "60s", + Count: &expectedCount, + Provider: v1alpha1.MetricProvider{ + Job: &v1alpha1.JobMetric{}, + }, + }, + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseRunning, + StartedAt: timePtr(metav1.NewTime(time.Now())), + MetricResults: []v1alpha1.MetricResult{ + { + Name: "metric1", + Phase: v1alpha1.AnalysisPhaseSuccessful, + Count: 1, + Measurements: []v1alpha1.Measurement{{ + Value: "1", + Phase: v1alpha1.AnalysisPhaseSuccessful, + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + }}, + }, + }, + }, + } + newRun := c.reconcileAnalysisRun(origRun) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, newRun.Status.Phase) + assert.NotNil(t, newRun.Status.CompletedAt) + assert.Equal(t, f.now, newRun.Status.CompletedAt.Time) +} + +func TestReconcileAnalysisRunOnRunNotFound(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + buf := bytes.NewBufferString("") + log.SetOutput(buf) + + // Prepend since there is a default reaction that captures it. + f.client.Fake.PrependReactor("delete", "analysisruns", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, k8serrors.NewNotFound(k8sschema.GroupResource{Resource: "analysisruns"}, "test") + }) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(1), + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + CompletedAt: timePtr(metav1.NewTime(f.now.Add(-2 * time.Second))), + }, + } + _ = c.reconcileAnalysisRun(origRun) + logMessage := buf.String() + assert.Contains(t, logMessage, "Trying to cleanup TTL exceeded analysis run") + assert.NotContains(t, logMessage, "Failed to garbage collect analysis run") + // One deletion issued. + assert.Len(t, f.client.Fake.Actions(), 1) + assert.Equal(t, "delete", f.client.Fake.Actions()[0].GetVerb()) + assert.Equal(t, "analysisruns", f.client.Fake.Actions()[0].GetResource().Resource) +} + +func TestReconcileAnalysisRunOnOtherRunErrors(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + buf := bytes.NewBufferString("") + log.SetOutput(buf) + + // Prepend since there is a default reaction that captures it. + f.client.Fake.PrependReactor("delete", "analysisruns", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("some error") + }) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(1), + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + CompletedAt: timePtr(metav1.NewTime(f.now.Add(-2 * time.Second))), + }, + } + _ = c.reconcileAnalysisRun(origRun) + logMessage := buf.String() + assert.Contains(t, logMessage, "Failed to garbage collect analysis run") + // One deletion issued. + assert.Len(t, f.client.Fake.Actions(), 1) + assert.Equal(t, "delete", f.client.Fake.Actions()[0].GetVerb()) + assert.Equal(t, "analysisruns", f.client.Fake.Actions()[0].GetResource().Resource) +} + +func TestMaybeGarbageCollectAnalysisRunNoGCIfNotCompleted(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseRunning, + }, + } + logger := logutil.WithAnalysisRun(origRun) + err := c.maybeGarbageCollectAnalysisRun(origRun, logger) + // No error, no deletion issued. + assert.NoError(t, err) + assert.Empty(t, f.client.Fake.Actions()) +} + +func TestMaybeGarbageCollectAnalysisRunNoGCIfNoTTLStrategy(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, + } + logger := logutil.WithAnalysisRun(origRun) + err := c.maybeGarbageCollectAnalysisRun(origRun, logger) + // No error, no deletion issued. + assert.NoError(t, err) + assert.Empty(t, f.client.Fake.Actions()) +} + +func TestMaybeGarbageCollectAnalysisRunNoGCIfWithDeletionTimestamp(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + DeletionTimestamp: timePtr(metav1.NewTime(f.now)), + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(1), + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + CompletedAt: timePtr(metav1.NewTime(f.now.Add(-2 * time.Second))), + }, + } + logger := logutil.WithAnalysisRun(origRun) + err := c.maybeGarbageCollectAnalysisRun(origRun, logger) + // No error, no deletion issued. + assert.NoError(t, err) + assert.Empty(t, f.client.Fake.Actions()) +} + +func TestMaybeGarbageCollectAnalysisRunNoGCIfNoCompletedAt(t *testing.T) { + f := newFixture(t) + defer f.Close() + c, _, _ := f.newController(noResyncPeriodFunc) + + origRun := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run" + string(uuid.NewUUID()), + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisRunSpec{ + TTLStrategy: &v1alpha1.TTLStrategy{ + SecondsAfterCompletion: pointer.Int32Ptr(1), + }, + }, + Status: v1alpha1.AnalysisRunStatus{ + Phase: v1alpha1.AnalysisPhaseSuccessful, + }, + } + logger := logutil.WithAnalysisRun(origRun) + err := c.maybeGarbageCollectAnalysisRun(origRun, logger) + // No error, no deletion issued. + assert.NoError(t, err) + assert.Empty(t, f.client.Fake.Actions()) +} diff --git a/analysis/controller.go b/analysis/controller.go index 505e6f005c..1afa6ab556 100644 --- a/analysis/controller.go +++ b/analysis/controller.go @@ -6,6 +6,10 @@ import ( "time" "github.com/argoproj/argo-rollouts/metric" + jobProvider "github.com/argoproj/argo-rollouts/metricproviders/job" + "github.com/aws/smithy-go/ptr" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" unstructuredutil "github.com/argoproj/argo-rollouts/utils/unstructured" @@ -31,6 +35,10 @@ import ( timeutil "github.com/argoproj/argo-rollouts/utils/time" ) +var ( + analysisRunGVK = v1alpha1.SchemeGroupVersion.WithKind("AnalysisRun") +) + // Controller is the controller implementation for Analysis resources type Controller struct { // kubeclientset is a standard kubernetes clientset @@ -49,8 +57,8 @@ type Controller struct { newProvider func(logCtx log.Entry, metric v1alpha1.Metric) (metric.Provider, error) // used for unit testing - enqueueAnalysis func(obj interface{}) - enqueueAnalysisAfter func(obj interface{}, duration time.Duration) + enqueueAnalysis func(obj any) + enqueueAnalysisAfter func(obj any, duration time.Duration) // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This @@ -91,10 +99,10 @@ func NewController(cfg ControllerConfig) *Controller { resyncPeriod: cfg.ResyncPeriod, } - controller.enqueueAnalysis = func(obj interface{}) { + controller.enqueueAnalysis = func(obj any) { controllerutil.Enqueue(obj, cfg.AnalysisRunWorkQueue) } - controller.enqueueAnalysisAfter = func(obj interface{}, duration time.Duration) { + controller.enqueueAnalysisAfter = func(obj any, duration time.Duration) { controllerutil.EnqueueAfter(obj, duration, cfg.AnalysisRunWorkQueue) } @@ -105,14 +113,14 @@ func NewController(cfg ControllerConfig) *Controller { controller.newProvider = providerFactory.NewProvider cfg.JobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - controller.enqueueIfCompleted(obj) + AddFunc: func(obj any) { + controller.enqueueJobIfCompleted(obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { - controller.enqueueIfCompleted(newObj) + UpdateFunc: func(oldObj, newObj any) { + controller.enqueueJobIfCompleted(newObj) }, - DeleteFunc: func(obj interface{}) { - controller.enqueueIfCompleted(obj) + DeleteFunc: func(obj any) { + controller.enqueueJobIfCompleted(obj) }, }) @@ -120,10 +128,10 @@ func NewController(cfg ControllerConfig) *Controller { // Set up an event handler for when analysis resources change cfg.AnalysisRunInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAnalysis, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { controller.enqueueAnalysis(new) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controller.enqueueAnalysis(obj) if ar := unstructuredutil.ObjectToAnalysisRun(obj); ar != nil { logCtx := logutil.WithAnalysisRun(ar) @@ -186,7 +194,35 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error { return c.persistAnalysisRunStatus(run, newRun.Status) } -func (c *Controller) enqueueIfCompleted(obj interface{}) { +func (c *Controller) jobParentReference(obj any) (*v1.OwnerReference, string) { + job, ok := obj.(*batchv1.Job) + if !ok { + return nil, "" + } + // if it has owner reference, return it as is + ownerRef := v1.GetControllerOf(job) + // else if it's missing owner reference check if analysis run uid is set and + // if it is there use labels/annotations to create owner reference + if ownerRef == nil && job.Labels[jobProvider.AnalysisRunUIDLabelKey] != "" { + ownerRef = &v1.OwnerReference{ + APIVersion: analysisRunGVK.GroupVersion().String(), + Kind: analysisRunGVK.Kind, + Name: job.Annotations[jobProvider.AnalysisRunNameAnnotationKey], + UID: types.UID(job.Labels[jobProvider.AnalysisRunUIDLabelKey]), + BlockOwnerDeletion: ptr.Bool(true), + Controller: ptr.Bool(true), + } + } + ns := job.GetNamespace() + if job.Annotations != nil { + if job.Annotations[jobProvider.AnalysisRunNamespaceAnnotationKey] != "" { + ns = job.Annotations[jobProvider.AnalysisRunNamespaceAnnotationKey] + } + } + return ownerRef, ns +} + +func (c *Controller) enqueueJobIfCompleted(obj any) { job, ok := obj.(*batchv1.Job) if !ok { return @@ -194,7 +230,7 @@ func (c *Controller) enqueueIfCompleted(obj interface{}) { for _, condition := range job.Status.Conditions { switch condition.Type { case batchv1.JobFailed, batchv1.JobComplete: - controllerutil.EnqueueParentObject(job, register.AnalysisRunKind, c.enqueueAnalysis) + controllerutil.EnqueueParentObject(job, register.AnalysisRunKind, c.enqueueAnalysis, c.jobParentReference) return } } diff --git a/analysis/controller_test.go b/analysis/controller_test.go index cde8ba853e..601139a6d4 100644 --- a/analysis/controller_test.go +++ b/analysis/controller_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "reflect" + "sync" "testing" "time" @@ -50,11 +51,18 @@ type fixture struct { // Actions expected to happen on the client. actions []core.Action // Objects from here preloaded into NewSimpleFake. - objects []runtime.Object - enqueuedObjects map[string]int - unfreezeTime func() error + objects []runtime.Object + + // Acquire 'enqueuedObjectMutex' before accessing enqueuedObjects + enqueuedObjects map[string]int + enqueuedObjectMutex sync.Mutex + + unfreezeTime func() error // fake provider provider *mocks.Provider + + // Reference to frozen now + now time.Time } func newFixture(t *testing.T) *fixture { @@ -62,12 +70,12 @@ func newFixture(t *testing.T) *fixture { f.t = t f.objects = []runtime.Object{} f.enqueuedObjects = make(map[string]int) - now := time.Now() - timeutil.Now = func() time.Time { - return now - } + f.now = time.Now() + timeutil.SetNowTimeFunc(func() time.Time { + return f.now + }) f.unfreezeTime = func() error { - timeutil.Now = time.Now + timeutil.SetNowTimeFunc(time.Now) return nil } return f @@ -113,12 +121,16 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share Recorder: record.NewFakeEventRecorder(), }) - c.enqueueAnalysis = func(obj interface{}) { + c.enqueueAnalysis = func(obj any) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { panic(err) } + + f.enqueuedObjectMutex.Lock() + defer f.enqueuedObjectMutex.Unlock() + count, ok := f.enqueuedObjects[key] if !ok { count = 0 @@ -127,7 +139,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share f.enqueuedObjects[key] = count c.analysisRunWorkQueue.Add(obj) } - c.enqueueAnalysisAfter = func(obj interface{}, duration time.Duration) { + c.enqueueAnalysisAfter = func(obj any, duration time.Duration) { c.enqueueAnalysis(obj) } f.provider = &mocks.Provider{} @@ -286,6 +298,22 @@ func (f *fixture) getPatchedAnalysisRun(index int) v1alpha1.AnalysisRun { //noli return ar } +func (f *fixture) expectDeleteAnalysisRunAction(analysisRun *v1alpha1.AnalysisRun) int { //nolint:unused + action := core.NewDeleteAction(schema.GroupVersionResource{Resource: "analysisrun"}, analysisRun.Namespace, analysisRun.Name) + len := len(f.actions) + f.actions = append(f.actions, action) + return len +} + +func (f *fixture) getDeletedAnalysisRunNamespaceAndName(index int) string { //nolint:unused + action := filterInformerActions(f.client.Actions())[index] + deleteAction, ok := action.(core.DeleteAction) + if !ok { + f.t.Fatalf("Expected Patch action, not %s", action.GetVerb()) + } + return fmt.Sprintf("%s/%s", deleteAction.GetNamespace(), deleteAction.GetName()) +} + func TestNoReconcileForNotFoundAnalysisRun(t *testing.T) { f := newFixture(t) defer f.Close() diff --git a/cmd/rollouts-controller/main.go b/cmd/rollouts-controller/main.go index ad7190c585..c8deab3ea0 100644 --- a/cmd/rollouts-controller/main.go +++ b/cmd/rollouts-controller/main.go @@ -6,8 +6,8 @@ import ( "strings" "time" + "github.com/argoproj/argo-rollouts/metricproviders" "github.com/argoproj/argo-rollouts/utils/record" - "github.com/argoproj/pkg/kubeclientmetrics" smiclientset "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/split/clientset/versioned" log "github.com/sirupsen/logrus" @@ -42,8 +42,12 @@ const ( cliName = "argo-rollouts" jsonFormat = "json" textFormat = "text" + + controllerAnalysis = "analysis" ) +var supportedControllers = map[string]bool{controllerAnalysis: true} + func newCommand() *cobra.Command { var ( clientConfig clientcmd.ClientConfig @@ -63,6 +67,8 @@ func newCommand() *cobra.Command { ingressThreads int istioVersion string trafficSplitVersion string + traefikAPIGroup string + traefikVersion string ambassadorVersion string ingressVersion string appmeshCRDVersion string @@ -72,6 +78,7 @@ func newCommand() *cobra.Command { namespaced bool printVersion bool selfServiceNotificationEnabled bool + controllersEnabled []string ) electOpts := controller.NewLeaderElectionOptions() var command = cobra.Command{ @@ -82,11 +89,13 @@ func newCommand() *cobra.Command { fmt.Println(version.GetVersion()) return nil } + logger := log.New() setLogLevel(logLevel) if logFormat != "" { log.SetFormatter(createFormatter(logFormat)) + logger.SetFormatter(createFormatter(logFormat)) } - logutil.SetKLogLogger(log.New()) + logutil.SetKLogLogger(logger) logutil.SetKLogLevel(klogLevel) log.WithField("version", version.GetVersion()).Info("Argo Rollouts starting") @@ -98,6 +107,8 @@ func newCommand() *cobra.Command { defaults.SetAmbassadorAPIVersion(ambassadorVersion) defaults.SetSMIAPIVersion(trafficSplitVersion) defaults.SetAppMeshCRDVersion(appmeshCRDVersion) + defaults.SetTraefikAPIGroup(traefikAPIGroup) + defaults.SetTraefikVersion(traefikVersion) config, err := clientConfig.ClientConfig() checkError(err) @@ -123,6 +134,7 @@ func newCommand() *cobra.Command { discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) checkError(err) smiClient, err := smiclientset.NewForConfig(config) + checkError(err) resyncDuration := time.Duration(rolloutResyncPeriod) * time.Second kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( kubeClient, @@ -132,10 +144,17 @@ func newCommand() *cobra.Command { instanceIDTweakListFunc := func(options *metav1.ListOptions) { options.LabelSelector = instanceIDSelector.String() } + jobKubeClient, _, err := metricproviders.GetAnalysisJobClientset(kubeClient) + checkError(err) + jobNs := metricproviders.GetAnalysisJobNamespace() + if jobNs == "" { + // if not set explicitly use the configured ns + jobNs = namespace + } jobInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( - kubeClient, + jobKubeClient, resyncDuration, - kubeinformers.WithNamespace(namespace), + kubeinformers.WithNamespace(jobNs), kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) { options.LabelSelector = jobprovider.AnalysisRunUIDLabelKey })) @@ -185,41 +204,67 @@ func newCommand() *cobra.Command { ingressWrapper, err := ingressutil.NewIngressWrapper(mode, kubeClient, kubeInformerFactory) checkError(err) - cm := controller.NewManager( - namespace, - kubeClient, - argoprojClient, - dynamicClient, - smiClient, - discoveryClient, - kubeInformerFactory.Apps().V1().ReplicaSets(), - kubeInformerFactory.Core().V1().Services(), - ingressWrapper, - jobInformerFactory.Batch().V1().Jobs(), - tolerantinformer.NewTolerantRolloutInformer(dynamicInformerFactory), - tolerantinformer.NewTolerantExperimentInformer(dynamicInformerFactory), - tolerantinformer.NewTolerantAnalysisRunInformer(dynamicInformerFactory), - tolerantinformer.NewTolerantAnalysisTemplateInformer(dynamicInformerFactory), - tolerantinformer.NewTolerantClusterAnalysisTemplateInformer(clusterDynamicInformerFactory), - istioPrimaryDynamicClient, - istioDynamicInformerFactory.ForResource(istioutil.GetIstioVirtualServiceGVR()).Informer(), - istioDynamicInformerFactory.ForResource(istioutil.GetIstioDestinationRuleGVR()).Informer(), - notificationConfigMapInformerFactory, - notificationSecretInformerFactory, - resyncDuration, - instanceID, - metricsPort, - healthzPort, - k8sRequestProvider, - nginxIngressClasses, - albIngressClasses, - dynamicInformerFactory, - clusterDynamicInformerFactory, - istioDynamicInformerFactory, - namespaced, - kubeInformerFactory, - jobInformerFactory) + var cm *controller.Manager + enabledControllers, err := getEnabledControllers(controllersEnabled) + checkError(err) + + // currently only supports running analysis controller independently + if enabledControllers[controllerAnalysis] { + log.Info("Running only analysis controller") + cm = controller.NewAnalysisManager( + namespace, + kubeClient, + argoprojClient, + jobInformerFactory.Batch().V1().Jobs(), + tolerantinformer.NewTolerantAnalysisRunInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantAnalysisTemplateInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantClusterAnalysisTemplateInformer(clusterDynamicInformerFactory), + resyncDuration, + metricsPort, + healthzPort, + k8sRequestProvider, + dynamicInformerFactory, + clusterDynamicInformerFactory, + namespaced, + kubeInformerFactory, + jobInformerFactory) + } else { + cm = controller.NewManager( + namespace, + kubeClient, + argoprojClient, + dynamicClient, + smiClient, + discoveryClient, + kubeInformerFactory.Apps().V1().ReplicaSets(), + kubeInformerFactory.Core().V1().Services(), + ingressWrapper, + jobInformerFactory.Batch().V1().Jobs(), + tolerantinformer.NewTolerantRolloutInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantExperimentInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantAnalysisRunInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantAnalysisTemplateInformer(dynamicInformerFactory), + tolerantinformer.NewTolerantClusterAnalysisTemplateInformer(clusterDynamicInformerFactory), + istioPrimaryDynamicClient, + istioDynamicInformerFactory.ForResource(istioutil.GetIstioVirtualServiceGVR()).Informer(), + istioDynamicInformerFactory.ForResource(istioutil.GetIstioDestinationRuleGVR()).Informer(), + notificationConfigMapInformerFactory, + notificationSecretInformerFactory, + resyncDuration, + instanceID, + metricsPort, + healthzPort, + k8sRequestProvider, + nginxIngressClasses, + albIngressClasses, + dynamicInformerFactory, + clusterDynamicInformerFactory, + istioDynamicInformerFactory, + namespaced, + kubeInformerFactory, + jobInformerFactory) + } if err = cm.Run(ctx, rolloutThreads, serviceThreads, ingressThreads, experimentThreads, analysisThreads, electOpts); err != nil { log.Fatalf("Error running controller: %s", err.Error()) } @@ -249,6 +294,8 @@ func newCommand() *cobra.Command { command.Flags().StringVar(&istioVersion, "istio-api-version", defaults.DefaultIstioVersion, "Set the default Istio apiVersion that controller should look when manipulating VirtualServices.") command.Flags().StringVar(&ambassadorVersion, "ambassador-api-version", defaults.DefaultAmbassadorVersion, "Set the Ambassador apiVersion that controller should look when manipulating Ambassador Mappings.") command.Flags().StringVar(&trafficSplitVersion, "traffic-split-api-version", defaults.DefaultSMITrafficSplitVersion, "Set the default TrafficSplit apiVersion that controller uses when creating TrafficSplits.") + command.Flags().StringVar(&traefikAPIGroup, "traefik-api-group", defaults.DefaultTraefikAPIGroup, "Set the default Traerfik apiGroup that controller uses.") + command.Flags().StringVar(&traefikVersion, "traefik-api-version", defaults.DefaultTraefikVersion, "Set the default Traerfik apiVersion that controller uses.") command.Flags().StringVar(&ingressVersion, "ingress-api-version", "", "Set the Ingress apiVersion that the controller should use.") command.Flags().StringVar(&appmeshCRDVersion, "appmesh-crd-version", defaults.DefaultAppMeshCRDVersion, "Set the default AppMesh CRD Version that controller uses when manipulating resources.") command.Flags().StringArrayVar(&albIngressClasses, "alb-ingress-classes", defaultALBIngressClass, "Defines all the ingress class annotations that the alb ingress controller operates on. Defaults to alb") @@ -262,6 +309,7 @@ func newCommand() *cobra.Command { command.Flags().DurationVar(&electOpts.LeaderElectionRenewDeadline, "leader-election-renew-deadline", controller.DefaultLeaderElectionRenewDeadline, "The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.") command.Flags().DurationVar(&electOpts.LeaderElectionRetryPeriod, "leader-election-retry-period", controller.DefaultLeaderElectionRetryPeriod, "The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.") command.Flags().BoolVar(&selfServiceNotificationEnabled, "self-service-notification-enabled", false, "Allows rollouts controller to pull notification config from the namespace that the rollout resource is in. This is useful for self-service notification.") + command.Flags().StringSliceVar(&controllersEnabled, "controllers", nil, "Explicitly specify the list of controllers to run, currently only supports 'analysis', eg. --controller=analysis. Default: all controllers are enabled") return &command } @@ -315,3 +363,15 @@ func checkError(err error) { log.Fatal(err) } } + +func getEnabledControllers(controllersEnabled []string) (map[string]bool, error) { + enabledControllers := make(map[string]bool) + for _, controller := range controllersEnabled { + if supportedControllers[controller] { + enabledControllers[controller] = true + } else { + return nil, fmt.Errorf("unsupported controller: %s", controller) + } + } + return enabledControllers, nil +} diff --git a/controller/controller.go b/controller/controller.go index afe4bc769b..229df6a5f6 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -163,6 +163,87 @@ type Manager struct { notificationSecretInformerFactory kubeinformers.SharedInformerFactory jobInformerFactory kubeinformers.SharedInformerFactory istioPrimaryDynamicClient dynamic.Interface + + onlyAnalysisMode bool +} + +func NewAnalysisManager( + namespace string, + kubeclientset kubernetes.Interface, + argoprojclientset clientset.Interface, + jobInformer batchinformers.JobInformer, + analysisRunInformer informers.AnalysisRunInformer, + analysisTemplateInformer informers.AnalysisTemplateInformer, + clusterAnalysisTemplateInformer informers.ClusterAnalysisTemplateInformer, + resyncPeriod time.Duration, + metricsPort int, + healthzPort int, + k8sRequestProvider *metrics.K8sRequestsCountProvider, + dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, + clusterDynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, + namespaced bool, + kubeInformerFactory kubeinformers.SharedInformerFactory, + jobInformerFactory kubeinformers.SharedInformerFactory, +) *Manager { + runtime.Must(rolloutscheme.AddToScheme(scheme.Scheme)) + log.Info("Creating event broadcaster") + + metricsAddr := fmt.Sprintf(listenAddr, metricsPort) + metricsServer := metrics.NewMetricsServer(metrics.ServerConfig{ + Addr: metricsAddr, + RolloutLister: nil, + AnalysisRunLister: analysisRunInformer.Lister(), + AnalysisTemplateLister: analysisTemplateInformer.Lister(), + ClusterAnalysisTemplateLister: clusterAnalysisTemplateInformer.Lister(), + ExperimentLister: nil, + K8SRequestProvider: k8sRequestProvider, + }) + + healthzServer := NewHealthzServer(fmt.Sprintf(listenAddr, healthzPort)) + analysisRunWorkqueue := workqueue.NewNamedRateLimitingQueue(queue.DefaultArgoRolloutsRateLimiter(), "AnalysisRuns") + recorder := record.NewEventRecorder(kubeclientset, metrics.MetricRolloutEventsTotal, metrics.MetricNotificationFailedTotal, metrics.MetricNotificationSuccessTotal, metrics.MetricNotificationSend, nil) + analysisController := analysis.NewController(analysis.ControllerConfig{ + KubeClientSet: kubeclientset, + ArgoProjClientset: argoprojclientset, + AnalysisRunInformer: analysisRunInformer, + JobInformer: jobInformer, + ResyncPeriod: resyncPeriod, + AnalysisRunWorkQueue: analysisRunWorkqueue, + MetricsServer: metricsServer, + Recorder: recorder, + }) + + cm := &Manager{ + wg: &sync.WaitGroup{}, + metricsServer: metricsServer, + healthzServer: healthzServer, + jobSynced: jobInformer.Informer().HasSynced, + analysisRunSynced: analysisRunInformer.Informer().HasSynced, + analysisTemplateSynced: analysisTemplateInformer.Informer().HasSynced, + clusterAnalysisTemplateSynced: clusterAnalysisTemplateInformer.Informer().HasSynced, + analysisRunWorkqueue: analysisRunWorkqueue, + analysisController: analysisController, + namespace: namespace, + kubeClientSet: kubeclientset, + dynamicInformerFactory: dynamicInformerFactory, + clusterDynamicInformerFactory: clusterDynamicInformerFactory, + namespaced: namespaced, + kubeInformerFactory: kubeInformerFactory, + jobInformerFactory: jobInformerFactory, + onlyAnalysisMode: true, + } + + _, err := rolloutsConfig.InitializeConfig(kubeclientset, defaults.DefaultRolloutsConfigMapName) + if err != nil { + log.Fatalf("Failed to init config: %v", err) + } + + err = plugin.DownloadPlugins(plugin.FileDownloaderImpl{}) + if err != nil { + log.Fatalf("Failed to download plugins: %v", err) + } + + return cm } // NewManager returns a new manager to manage all the controllers @@ -223,7 +304,7 @@ func NewManager( ingressWorkqueue := workqueue.NewNamedRateLimitingQueue(queue.DefaultArgoRolloutsRateLimiter(), "Ingresses") refResolver := rollout.NewInformerBasedWorkloadRefResolver(namespace, dynamicclientset, discoveryClient, argoprojclientset, rolloutsInformer.Informer()) - apiFactory := notificationapi.NewFactory(record.NewAPIFactorySettings(), defaults.Namespace(), notificationSecretInformerFactory.Core().V1().Secrets().Informer(), notificationConfigMapInformerFactory.Core().V1().ConfigMaps().Informer()) + apiFactory := notificationapi.NewFactory(record.NewAPIFactorySettings(analysisRunInformer), defaults.Namespace(), notificationSecretInformerFactory.Core().V1().Secrets().Informer(), notificationConfigMapInformerFactory.Core().V1().ConfigMaps().Informer()) recorder := record.NewEventRecorder(kubeclientset, metrics.MetricRolloutEventsTotal, metrics.MetricNotificationFailedTotal, metrics.MetricNotificationSuccessTotal, metrics.MetricNotificationSend, apiFactory) notificationsController := notificationcontroller.NewControllerWithNamespaceSupport(dynamicclientset.Resource(v1alpha1.RolloutGVR), rolloutsInformer.Informer(), apiFactory, notificationcontroller.WithToUnstructured(func(obj metav1.Object) (*unstructured.Unstructured, error) { @@ -441,11 +522,13 @@ func (c *Manager) Run(ctx context.Context, rolloutThreadiness, serviceThreadines log.Info("Shutting down workers") goPlugin.CleanupClients() - c.serviceWorkqueue.ShutDownWithDrain() - c.ingressWorkqueue.ShutDownWithDrain() - c.rolloutWorkqueue.ShutDownWithDrain() - c.experimentWorkqueue.ShutDownWithDrain() - c.analysisRunWorkqueue.ShutDownWithDrain() + if !c.onlyAnalysisMode { + c.serviceWorkqueue.ShutDownWithDrain() + c.ingressWorkqueue.ShutDownWithDrain() + c.rolloutWorkqueue.ShutDownWithDrain() + c.experimentWorkqueue.ShutDownWithDrain() + } + c.analysisRunWorkqueue.ShutDownWithDrain() ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) // give max of 10 seconds for http servers to shut down @@ -479,29 +562,50 @@ func (c *Manager) startLeading(ctx context.Context, rolloutThreadiness, serviceT c.jobInformerFactory.Start(ctx.Done()) - // Check if Istio installed on cluster before starting dynamicInformerFactory - if istioutil.DoesIstioExist(c.istioPrimaryDynamicClient, c.namespace) { - c.istioDynamicInformerFactory.Start(ctx.Done()) - } + if c.onlyAnalysisMode { + log.Info("Waiting for controller's informer caches to sync") + if ok := cache.WaitForCacheSync(ctx.Done(), c.analysisRunSynced, c.analysisTemplateSynced, c.jobSynced); !ok { + log.Fatalf("failed to wait for caches to sync, exiting") + } + // only wait for cluster scoped informers to sync if we are running in cluster-wide mode + if c.namespace == metav1.NamespaceAll { + if ok := cache.WaitForCacheSync(ctx.Done(), c.clusterAnalysisTemplateSynced); !ok { + log.Fatalf("failed to wait for cluster-scoped caches to sync, exiting") + } + } + go wait.Until(func() { c.wg.Add(1); c.analysisController.Run(ctx, analysisThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + } else { - // Wait for the caches to be synced before starting workers - log.Info("Waiting for controller's informer caches to sync") - if ok := cache.WaitForCacheSync(ctx.Done(), c.serviceSynced, c.ingressSynced, c.jobSynced, c.rolloutSynced, c.experimentSynced, c.analysisRunSynced, c.analysisTemplateSynced, c.replicasSetSynced, c.configMapSynced, c.secretSynced); !ok { - log.Fatalf("failed to wait for caches to sync, exiting") - } - // only wait for cluster scoped informers to sync if we are running in cluster-wide mode - if c.namespace == metav1.NamespaceAll { - if ok := cache.WaitForCacheSync(ctx.Done(), c.clusterAnalysisTemplateSynced); !ok { - log.Fatalf("failed to wait for cluster-scoped caches to sync, exiting") + c.notificationConfigMapInformerFactory.Start(ctx.Done()) + c.notificationSecretInformerFactory.Start(ctx.Done()) + if ok := cache.WaitForCacheSync(ctx.Done(), c.configMapSynced, c.secretSynced); !ok { + log.Fatalf("failed to wait for configmap/secret caches to sync, exiting") } - } - go wait.Until(func() { c.wg.Add(1); c.rolloutController.Run(ctx, rolloutThreadiness); c.wg.Done() }, time.Second, ctx.Done()) - go wait.Until(func() { c.wg.Add(1); c.serviceController.Run(ctx, serviceThreadiness); c.wg.Done() }, time.Second, ctx.Done()) - go wait.Until(func() { c.wg.Add(1); c.ingressController.Run(ctx, ingressThreadiness); c.wg.Done() }, time.Second, ctx.Done()) - go wait.Until(func() { c.wg.Add(1); c.experimentController.Run(ctx, experimentThreadiness); c.wg.Done() }, time.Second, ctx.Done()) - go wait.Until(func() { c.wg.Add(1); c.analysisController.Run(ctx, analysisThreadiness); c.wg.Done() }, time.Second, ctx.Done()) - go wait.Until(func() { c.wg.Add(1); c.notificationsController.Run(rolloutThreadiness, ctx.Done()); c.wg.Done() }, time.Second, ctx.Done()) + // Check if Istio installed on cluster before starting dynamicInformerFactory + if istioutil.DoesIstioExist(c.istioPrimaryDynamicClient, c.namespace) { + c.istioDynamicInformerFactory.Start(ctx.Done()) + } + // Wait for the caches to be synced before starting workers + log.Info("Waiting for controller's informer caches to sync") + if ok := cache.WaitForCacheSync(ctx.Done(), c.serviceSynced, c.ingressSynced, c.jobSynced, c.rolloutSynced, c.experimentSynced, c.analysisRunSynced, c.analysisTemplateSynced, c.replicasSetSynced, c.configMapSynced, c.secretSynced); !ok { + log.Fatalf("failed to wait for caches to sync, exiting") + } + // only wait for cluster scoped informers to sync if we are running in cluster-wide mode + if c.namespace == metav1.NamespaceAll { + if ok := cache.WaitForCacheSync(ctx.Done(), c.clusterAnalysisTemplateSynced); !ok { + log.Fatalf("failed to wait for cluster-scoped caches to sync, exiting") + } + } + + go wait.Until(func() { c.wg.Add(1); c.rolloutController.Run(ctx, rolloutThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + go wait.Until(func() { c.wg.Add(1); c.serviceController.Run(ctx, serviceThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + go wait.Until(func() { c.wg.Add(1); c.ingressController.Run(ctx, ingressThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + go wait.Until(func() { c.wg.Add(1); c.experimentController.Run(ctx, experimentThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + go wait.Until(func() { c.wg.Add(1); c.analysisController.Run(ctx, analysisThreadiness); c.wg.Done() }, time.Second, ctx.Done()) + go wait.Until(func() { c.wg.Add(1); c.notificationsController.Run(rolloutThreadiness, ctx.Done()); c.wg.Done() }, time.Second, ctx.Done()) + + } log.Info("Started controller") } diff --git a/controller/controller_test.go b/controller/controller_test.go index f724ef0712..0f94131c96 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -205,7 +205,7 @@ func (f *fixture) newManager(t *testing.T) *Manager { Recorder: record.NewFakeEventRecorder(), }) - apiFactory := notificationapi.NewFactory(record.NewAPIFactorySettings(), "default", k8sI.Core().V1().Secrets().Informer(), k8sI.Core().V1().ConfigMaps().Informer()) + apiFactory := notificationapi.NewFactory(record.NewAPIFactorySettings(i.Argoproj().V1alpha1().AnalysisRuns()), "default", k8sI.Core().V1().Secrets().Informer(), k8sI.Core().V1().ConfigMaps().Informer()) // rolloutsInformer := rolloutinformers.NewRolloutInformer(f.client, "", time.Minute, cache.Indexers{}) cm.notificationsController = notificationcontroller.NewController(dynamicClient.Resource(v1alpha1.RolloutGVR), i.Argoproj().V1alpha1().Rollouts().Informer(), apiFactory, notificationcontroller.WithToUnstructured(func(obj metav1.Object) (*unstructured.Unstructured, error) { diff --git a/controller/metrics/metrics.go b/controller/metrics/metrics.go index bad18a1eb3..9691b73a40 100644 --- a/controller/metrics/metrics.go +++ b/controller/metrics/metrics.go @@ -55,9 +55,13 @@ func NewMetricsServer(cfg ServerConfig) *MetricsServer { reg := prometheus.NewRegistry() - reg.MustRegister(NewRolloutCollector(cfg.RolloutLister)) + if cfg.RolloutLister != nil { + reg.MustRegister(NewRolloutCollector(cfg.RolloutLister)) + } + if cfg.ExperimentLister != nil { + reg.MustRegister(NewExperimentCollector(cfg.ExperimentLister)) + } reg.MustRegister(NewAnalysisRunCollector(cfg.AnalysisRunLister, cfg.AnalysisTemplateLister, cfg.ClusterAnalysisTemplateLister)) - reg.MustRegister(NewExperimentCollector(cfg.ExperimentLister)) cfg.K8SRequestProvider.MustRegister(reg) reg.MustRegister(MetricRolloutReconcile) reg.MustRegister(MetricRolloutReconcileError) diff --git a/controller/metrics/metrics_test.go b/controller/metrics/metrics_test.go index 00700321fa..ced10b2442 100644 --- a/controller/metrics/metrics_test.go +++ b/controller/metrics/metrics_test.go @@ -54,7 +54,7 @@ func newFakeServerConfig(objs ...runtime.Object) ServerConfig { } } -func testHttpResponse(t *testing.T, handler http.Handler, expectedResponse string, testFunc func(t assert.TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) bool) { +func testHttpResponse(t *testing.T, handler http.Handler, expectedResponse string, testFunc func(t assert.TestingT, s any, contains any, msgAndArgs ...any) bool) { t.Helper() req, err := http.NewRequest("GET", "/metrics", nil) assert.NoError(t, err) diff --git a/controller/metrics/prommetrics.go b/controller/metrics/prommetrics.go index 3e55ba1ccb..c0d3b89f62 100644 --- a/controller/metrics/prommetrics.go +++ b/controller/metrics/prommetrics.go @@ -135,7 +135,7 @@ var ( MetricAnalysisTemplateInfo = prometheus.NewDesc( "analysis_template_info", "Information about analysis templates.", - append(namespaceNameLabels), + namespaceNameLabels, nil, ) diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index a37ad29fc0..f727834d8b 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -8,7 +8,7 @@ Install: * [docker](https://docs.docker.com/install/#supported-platforms) * [golang](https://golang.org/) * [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases) +* [kustomize](https://github.com/kubernetes-sigs/kustomize/releases) >= 4.5.5 * [k3d](https://k3d.io/) recommended Kustomize is required for unit tests (`make test` is using it), so you [must install it](https://kubectl.docs.kubernetes.io/installation/kustomize/) @@ -110,6 +110,29 @@ To run a subset of e2e tests, you need to specify the suite with `-run`, and the E2E_TEST_OPTIONS="-run 'TestCanarySuite' -testify.m 'TestCanaryScaleDownOnAbortNoTrafficRouting'" make test-e2e ``` +## Running the UI + +If you'd like to run the UI locally, you first need a running Rollouts controller. This can be a locally running controller with a k3d cluster, as described above, or a controller running in a remote Kubernetes cluster. + +In order for the local React app to communicate with the controller and Kubernetes API, run the following to open a port forward to the dashboard: +```bash +kubectl argo rollouts dashboard +``` + +Note that you can also build the API server and run this instead, + +``` +make plugin +./dist/kubectl-argo-rollouts dashboard +``` + +In another terminal, run the following to start the UI: +```bash +cd ui +yarn install +yarn start +``` + ## Controller architecture Argo Rollouts is actually a collection of individual controllers @@ -178,6 +201,12 @@ make start-e2e E2E_INSTANCE_ID='' ``` +6. Working on CRDs? While editing them directly works when you are finding the shape of things you want, the final CRDs are autogenerated. Make sure to regenerate them before submitting PRs. They are controlled by the relevant annotations in the types file: + +eg: Analysis Templates are controlled by annotations in `pkg/apis/rollouts/v1alpha1/analysis_types.go`. + +Refer to https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html and https://book.kubebuilder.io/reference/markers/crd-validation.html for more info on annotations you can use. + ## Running Local Containers You may need to run containers locally, so here's how: @@ -218,6 +247,23 @@ pushed into the other kubernetes repositories yet. In order to import the kubern associated repos have to pinned to the correct version specified by the kubernetes/kubernetes release. The `./hack/update-k8s-dependencies.sh` updates all the dependencies to the those correct versions. +## Upgrading Notifications Engine +Argo Rollouts has a dependency on the [argoproj/notifications-engines](https://github.com/argoproj/notifications-engine) repo +for the notifications functionality and related documentation. + +This is updated by upgrading the Go library in `go.mod` by running the commands: + +```shell +go get github.com/argoproj/notifications-engine@LATEST_COMMIT_HASH +go mod tidy +``` + +Next the latest notifications documentation can be imported by running: + +```shell +make docs +``` + ## Documentation Changes Modify contents in `docs/` directory. diff --git a/docs/FAQ.md b/docs/FAQ.md index 8a0c921cc7..861e4b3a80 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -41,6 +41,10 @@ solution that does not follow the GitOps approach. Yes. A k8s cluster can run multiple replicas of Argo-rollouts controllers to achieve HA. To enable this feature, run the controller with `--leader-elect` flag and increase the number of replicas in the controller's deployment manifest. The implementation is based on the [k8s client-go's leaderelection package](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection#section-documentation). This implementation is tolerant to *arbitrary clock skew* among replicas. The level of tolerance to skew rate can be configured by setting `--leader-election-lease-duration` and `--leader-election-renew-deadline` appropriately. Please refer to the [package documentation](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection#pkg-overview) for details. +### Can we install Argo Rollouts centrally in a cluster and manage Rollout resources in external clusters? + +No you cannot do that (even though Argo CD can work that way). This is by design because the Rollout is a custom resource unknown to vanilla Kubernetes. You need the Rollout CRD as well as the controller in the deployment cluster (every cluster that will use workloads with Rollouts). + ## Rollouts ### Which deployment strategies does Argo Rollouts support? diff --git a/docs/analysis/datadog.md b/docs/analysis/datadog.md index 3010c9a4df..7bca70be10 100644 --- a/docs/analysis/datadog.md +++ b/docs/analysis/datadog.md @@ -1,8 +1,5 @@ # Datadog Metrics -!!! important - Available since v0.10.0 - A [Datadog](https://www.datadoghq.com/) query can be used to obtain measurements for analysis. ```yaml @@ -26,10 +23,7 @@ spec: sum:requests.error.rate{service:{{args.service-name}}} ``` -The field `apiVersion` refers to the API version of Datadog (v1 or v2). Default value is `v1` if this is omitted. - -!!! note - Datadog is moving away from the legacy v1 API. Rate limits imposed by Datadog are therefore stricter when using v1. It is recommended to switch to v2 soon. If you switch to v2, you will not be able to use formulas (operations between individual queries). +The field `apiVersion` refers to the API version of Datadog (v1 or v2). Default value is `v1` if this is omitted. See "Working with Datadog API v2" below for more information. Datadog api and app tokens can be configured in a kubernetes secret in argo-rollouts namespace. @@ -46,3 +40,162 @@ stringData: ``` `apiVersion` here is different from the `apiVersion` from the Datadog configuration above. + +### Working with Datadog API v2 + +!!! important + While some basic v2 functionality is working in earlier versions, the new properties of `formula` and `queries` are only available as of v1.7 + +#### Moving to v2 + +If your old v1 was just a simple metric query - no formula as part of the query - then you can just move to v2 by updating the `apiVersion` in your existing Analysis Template, and everything should work. + +If you have a formula, you will need to update how you configure your metric. Here is a before/after example of what your Analysis Template should look like: + +Before: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: log-error-rate +spec: + args: + - name: service-name + metrics: + - name: error-rate + interval: 30s + successCondition: default(result, 0) < 10 + failureLimit: 3 + provider: + datadog: + apiVersion: v1 + interval: 5m + query: "moving_rollup(sum:requests.errors{service:{{args.service-name}}}.as_count(), 60, 'sum') / sum:requests{service:{{args.service-name}}}.as_count()" +``` + +After: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: loq-error-rate +spec: + args: + - name: service-name + metrics: + - name: error-rate + # Polling rate against the Datadog API + interval: 30s + successCondition: default(result, 0) < 10 + failureLimit: 3 + provider: + datadog: + apiVersion: v2 + # The window of time we are looking at in DD. Basically we will fetch data from (now-5m) to now. + interval: 5m + queries: + a: sum:requests.errors{service:{{args.service-name}}}.as_count() + b: sum:requests{service:{{args.service-name}}}.as_count() + formula: "moving_rollup(a, 60, 'sum') / b" +``` + +#### Examples + +Simple v2 query with no formula + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: canary-container-restarts +spec: + args: + # This is set in rollout using the valueFrom: podTemplateHashValue functionality + - name: canary-hash + - name: service-name + - name: restarts.initial-delay + value: "60s" + - name: restarts.max-restarts + value: "4" + metrics: + - name: kubernetes.containers.restarts + initialDelay: "{{ args.restarts.initial-delay }}" + interval: 15s + failureCondition: default(result, 0) > {{ args.restarts.max-restarts }} + failureLimit: 0 + provider: + datadog: + apiVersion: v2 + interval: 5m + queries: + # The key is arbitrary - you will use this key to refer to the query if you use a formula. + q: "max:kubernetes.containers.restarts{service-name:{{args.service-name}},rollouts_pod_template_hash:{{args.canary-hash}}}" +``` + +### Tips + +#### Datadog Results + +Datadog queries can return empty results if the query takes place during a time interval with no metrics. The Datadog provider will return a `nil` value yielding an error during the evaluation phase like: + +``` +invalid operation: < (mismatched types and float64) +``` + +However, empty query results yielding a `nil` value can be handled using the `default()` function. Here is a succeeding example using the `default()` function: + +```yaml +successCondition: default(result, 0) < 0.05 +``` + +#### Metric aggregation (v2 only) + +By default, Datadog analysis run is configured to use `last` metric aggregator when querying Datadog v2 API. This value can be overriden by specifying a new `aggregator` value from a list of supported aggregators (`avg,min,max,sum,last,percentile,mean,l2norm,area`) for the V2 API ([docs](https://docs.datadoghq.com/api/latest/metrics/#query-scalar-data-across-multiple-products)). + +For example, using count-based distribution metric (`count:metric{*}.as_count()`) with values `1,9,3,7,5` in a given `interval` will make `last` aggregator return `5`. To return a sum of all values (`25`), set `aggregator: sum` in Datadog provider block and use `moving_rollup()` function to aggregate values in the specified rollup interval. These functions can be combined in a `formula` to perform additional calculations: + +```yaml +... + metrics: + - name: error-percentage + interval: 30s + successCondition: default(result, 0) < 5 + failureLimit: 3 + provider: + datadog: + apiVersion: v2 + interval: 5m + aggregator: sum # override default aggregator + queries: + a: count:requests.errors{service:my-service}.as_count() + b: count:requests{service:my-service}.as_count() + formula: "moving_rollup(a, 300, 'sum') / moving_rollup(b, 300, 'sum') * 100" # percentage of requests with errors +``` + +#### Templates and Helm + +Helm and Argo Rollouts both try to parse things between `{{ ... }}` when rendering templates. If you use Helm to deliver your manifests, you will need to escape `{{ args.whatever }}`. Using the example above, here it is set up for Helm: + +```yaml +... +metrics: + - name: kubernetes.containers.restarts + initialDelay: "{{ `{{ args.restarts.initial-delay }}` }}" + interval: 15s + failureCondition: default(result, 0) > {{ `{{ args.restarts.max-restarts }}` }} + failureLimit: 0 + provider: + datadog: + apiVersion: v2 + interval: 5m + queries: + q: "{{ `max:kubernetes.containers.restarts{kube_app_name:{{args.kube_app_name}},rollouts_pod_template_hash:{{args.canary-hash}}}` }}" +``` + +#### Rate Limits + +For the `v1` API, you ask for an increase on the `api/v1/query` route. + +For the `v2` API, the Ratelimit-Name you ask for an increase in is the `query_scalar_public`. diff --git a/docs/analysis/prometheus.md b/docs/analysis/prometheus.md index 786b3d2c6f..cb93bf0e5d 100644 --- a/docs/analysis/prometheus.md +++ b/docs/analysis/prometheus.md @@ -23,7 +23,7 @@ spec: # timeout is expressed in seconds timeout: 40 headers: - - name: X-Scope-Org-ID + - key: X-Scope-OrgID value: tenant_a query: | sum(irate( @@ -39,7 +39,9 @@ you validate your [PromQL expression](https://prometheus.io/docs/prometheus/late See the [Analysis Overview page](../../features/analysis) for more details on the available options. -## Utilizing Amazon Managed Prometheus +## Authorization + +### Utilizing Amazon Managed Prometheus Amazon Managed Prometheus can be used as the prometheus data source for analysis. In order to do this the namespace where your analysis is running will have to have the appropriate [IRSA attached](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-ingest-metrics-new-Prometheus.html#AMP-onboard-new-Prometheus-IRSA) to allow for prometheus queries. Once you ensure the proper permissions are in place to access AMP, you can use an AMP workspace url in your ```provider``` block and add a SigV4 config for Sigv4 signing: @@ -61,6 +63,55 @@ provider: roleArn: $ROLEARN ``` +### With OAuth2 + +You can setup an [OAuth2 client credential](https://datatracker.ietf.org/doc/html/rfc6749#section-4.4) flow using the following values: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: success-rate +spec: + args: + - name: service-name + # from secret + - name: oauthSecret # This is the OAuth2 shared secret + valueFrom: + secretKeyRef: + name: oauth-secret + key: secret + metrics: + - name: success-rate + interval: 5m + # NOTE: prometheus queries return results in the form of a vector. + # So it is common to access the index 0 of the returned array to obtain the value + successCondition: result[0] >= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + # timeout is expressed in seconds + timeout: 40 + authentication: + oauth2: + tokenUrl: https://my-oauth2-provider/token + clientId: my-cliend-id + clientSecret: "{{ args.oauthSecret }}" + scopes: [ + "my-oauth2-scope" + ] + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) +``` + +The AnalysisRun will first get an access token using that information, and provide it as an `Authorization: Bearer` header for the metric provider call. + ## Additional Metadata Any additional metadata from the Prometheus controller, like the resolved queries after substituting the template's diff --git a/docs/analysis/web.md b/docs/analysis/web.md index db501d0ead..d6ba5abf35 100644 --- a/docs/analysis/web.md +++ b/docs/analysis/web.md @@ -49,7 +49,7 @@ NOTE: if the result is a string, two convenience functions `asInt` and `asFloat` to convert a result value to a numeric type so that mathematical comparison operators can be used (e.g. >, <, >=, <=). -### Optional web methods +## Optional web methods It is possible to use a POST or PUT requests, by specifying the `method` and either `body` or `jsonBody` fields ```yaml @@ -96,7 +96,7 @@ It is possible to use a POST or PUT requests, by specifying the `method` and eit jsonPath: "{$.data.ok}" ``` -### Skip TLS verification +## Skip TLS verification You can skip the TLS verification of the web host provided by setting the options `insecure: true`. @@ -112,4 +112,47 @@ You can skip the TLS verification of the web host provided by setting the option - key: Authorization value: "Bearer {{ args.api-token }}" jsonPath: "{$.data}" -``` \ No newline at end of file +``` +## Authorization + +### With OAuth2 + +You can setup an [OAuth2 client credential](https://datatracker.ietf.org/doc/html/rfc6749#section-4.4) flow using the following values: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: success-rate +spec: + args: + - name: service-name + # from secret + - name: oauthSecret # This is the OAuth2 shared secret + valueFrom: + secretKeyRef: + name: oauth-secret + key: secret + metrics: + - name: webmetric + successCondition: result == true + provider: + web: + url: "http://my-server.com/api/v1/measurement?service={{ args.service-name }}" + timeoutSeconds: 20 # defaults to 10 seconds + authentication: + oauth2: + tokenUrl: https://my-oauth2-provider/token + clientId: my-cliend-id + clientSecret: "{{ args.oauthSecret }}" + scopes: [ + "my-oauth2-scope" + ] + headers: + - key: Content-Type # if body is a json, it is recommended to set the Content-Type + value: "application/json" + jsonPath: "{$.data.ok}" +``` + +In that case, no need to provide specifically the `Authentication` header. +The AnalysisRun will first get an access token using that information, and provide it as an `Authorization: Bearer` header for the metric provider call. diff --git a/docs/assets/versions.js b/docs/assets/versions.js index 1336443d1a..fe0aeb1d3c 100644 --- a/docs/assets/versions.js +++ b/docs/assets/versions.js @@ -1,15 +1,40 @@ -setTimeout(function() { - const callbackName = 'callback_' + new Date().getTime(); - window[callbackName] = function (response) { - const div = document.createElement('div'); - div.innerHTML = response.html; - document.querySelector(".md-header__inner > .md-header__title").appendChild(div); - const container = div.querySelector('.rst-versions'); - var caret = document.createElement('div'); - caret.innerHTML = "" - caret.classList.add('dropdown-caret') - div.querySelector('.rst-current-version').appendChild(caret); +const targetNode = document.querySelector('.md-header__inner'); +const observerOptions = { + childList: true, + subtree: true +}; + +const observerCallback = function(mutationsList, observer) { + for (let mutation of mutationsList) { + if (mutation.type === 'childList') { + const titleElement = document.querySelector('.md-header__inner > .md-header__title'); + if (titleElement) { + initializeVersionDropdown(); + observer.disconnect(); + } + } } +}; + +const observer = new MutationObserver(observerCallback); +observer.observe(targetNode, observerOptions); + +function initializeVersionDropdown() { + const callbackName = 'callback_' + new Date().getTime(); + window[callbackName] = function(response) { + const div = document.createElement('div'); + div.innerHTML = response.html; + document.querySelector(".md-header__inner > .md-header__title").appendChild(div); + const container = div.querySelector('.rst-versions'); + var caret = document.createElement('div'); + caret.innerHTML = ""; + caret.classList.add('dropdown-caret'); + div.querySelector('.rst-current-version').appendChild(caret); + + div.querySelector('.rst-current-version').addEventListener('click', function() { + container.classList.toggle('shift-up'); + }); + }; var CSSLink = document.createElement('link'); CSSLink.rel='stylesheet'; @@ -20,6 +45,31 @@ setTimeout(function() { script.src = 'https://argo-rollouts.readthedocs.io/_/api/v2/footer_html/?'+ 'callback=' + callbackName + '&project=argo-rollouts&page=&theme=mkdocs&format=jsonp&docroot=docs&source_suffix=.md&version=' + (window['READTHEDOCS_DATA'] || { version: 'latest' }).version; document.getElementsByTagName('head')[0].appendChild(script); -}, 0); - +} +// VERSION WARNINGS +window.addEventListener("DOMContentLoaded", function() { + var currentVersion = window.location.href.match(/\/en\/(release-(?:v\d+|\w+)|latest|stable)\//); + var margin = 30; + var headerHeight = document.getElementsByClassName("md-header")[0].offsetHeight; + if (currentVersion && currentVersion.length > 1) { + currentVersion = currentVersion[1]; + if (currentVersion === "latest") { + document.querySelector("div[data-md-component=announce]").innerHTML = "
You are viewing the docs for an unreleased version of Argo Rollouts, click here to go to the latest stable version.
"; + var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin; + document.querySelector("header.md-header").style.top = bannerHeight + "px"; + document.querySelector('style').textContent += + "@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}"; + document.querySelector('style').textContent += + "@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}"; + } else if (currentVersion !== "stable") { + document.querySelector("div[data-md-component=announce]").innerHTML = "
You are viewing the docs for a previous version of Argo Rollouts, click here to go to the latest stable version.
"; + var bannerHeight = document.getElementById('announce-msg').offsetHeight + margin; + document.querySelector("header.md-header").style.top = bannerHeight + "px"; + document.querySelector('style').textContent += + "@media screen and (min-width: 76.25em){ .md-sidebar { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}"; + document.querySelector('style').textContent += + "@media screen and (min-width: 60em){ .md-sidebar--secondary { height: 0; top:" + (bannerHeight + headerHeight) + "px !important; }}"; + } + } +}); \ No newline at end of file diff --git a/docs/best-practices.md b/docs/best-practices.md index ffbb4ff411..2f7908f4ba 100644 --- a/docs/best-practices.md +++ b/docs/best-practices.md @@ -1,6 +1,71 @@ # Best Practices -This document describes some best practices, tips and tricks when using Argo Rollouts. +This document describes some best practices, tips and tricks when using Argo Rollouts. Be sure to read the [FAQ page](../FAQ) as well. + + +## Check application compatibility + +Argo Rollouts is a great solution for applications that your team is deploying in a continuous manner (and you have access to the source code). Before using Argo Rollouts you need to contact the developers of the application and verify that you can indeed run multiple versions of the same application at the same time. + +Not all applications can work with Argo Rollouts. Applications that use shared resources (e.g. writing to a shared file) will have issues, and "worker" type applications (that load data from queues) will rarely work ok without source code modifications. + +Note that using Argo Rollouts for "infrastructure" applications such as cert-manager, nginx, coredns, sealed-secrets etc is **NOT** recommended. + +## Understand the scope of Argo Rollouts + +Currently Argo Rollouts works with a single Kubernetes deployment/application and within a single cluster only. You also need to have the controller deployed on *every* cluster where a Rollout is running if have more than one clusters using Rollout workloads. + +If you want to look at multiple-services on multiple clusters +see discussion at issues [2737](https://github.com/argoproj/argo-rollouts/issues/2737), [451](https://github.com/argoproj/argo-rollouts/issues/451) and [2088](https://github.com/argoproj/argo-rollouts/issues/2088). + + +Note also that Argo Rollouts is a self-contained solution. It doesn't need Argo CD or any other Argo project to work. + +## Understand your use case + +Argo Rollouts is perfect for all progressive delivery scenarios as explained in [the concepts page](../concepts). + +You should *NOT* use Argo Rollouts for preview/ephemeral environments. For that use case check the [Argo CD Pull Request generator](https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Generators-Pull-Request/). + +The recommended way to use Argo Rollouts is for brief deployments that take 15-20 minutes or maximum 1-2 hours. If you want to run new versions for days or weeks before deciding to promote, then Argo Rollouts is probably not the best solution for you. + +Also, if you want to run a wave of multiple versions at the same time (i.e. have 1.1 and 1.2 and 1.3 running at the same time), know that Argo Rollouts was not designed for this scenario. + +A version that has just been promoted is assumed to be ready for production and has already passed all your tests (either manual or automated). + +## Prepare your metrics + +The end-goal for using Argo Rollouts is to have **fully automated** deployments that also include rollbacks when needed. + +While Argo Rollouts supports manual promotions and other manual pauses, these are best used for experimentation and test reasons. + +Ideally you should have proper metrics that tell you in 5-15 minutes if a deployment is successful or not. If you don't have those metrics, then you will miss a lot of value from Argo Rollouts. + +Get your [metrics](../features/analysis) in place first and test them with dry-runs before applying them to production deployments. + + +## There is no "Argo Rollouts API" + +A lot of people want to find an official API for managing Rollouts. There isn't any separate Argo Rollouts API. You can always use the Kubernetes API and patching of resources if you want to control a rollout. + +But as explained in the previous point the end goal should be fully automated deployments without you having to tell Argo Rollouts to promote or abort. + +## Integrating with other systems and processes + +There are two main ways to integrate other systems with Argo Rollouts. + +The easiest way is to use [Notifications](../features/notifications). This means that when a rollout is finished/aborted you send a notification to another system that does other tasks that you want to happen. + +Alternatively you can control Rollouts with the CLI or by patching manually the Kubernetes resources. + + +## Use the Kubernetes Downward API + +If you want your applications to know if they are part of a canary or not, you can use [Ephemeral labels](../features/ephemeral-metadata) along with the [Kubernetes downward api](https://kubernetes.io/docs/concepts/workloads/pods/downward-api/). + +This means that your application will read from files its configuration in a dynamic manner and adapt according to the situation. + + ## Ingress desired/stable host routes @@ -19,7 +84,7 @@ to the ingress rules so that it is possible to specifically reach to the desired pods or stable pods. ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: guestbook @@ -29,25 +94,36 @@ spec: - host: guestbook-desired.argoproj.io http: paths: - - backend: - serviceName: guestbook-desired - servicePort: 443 - path: /* + - path: / + pathType: Prefix + backend: + service: + name: guestbook-desired + port: + number: 443 + # host rule to only reach the stable pods - host: guestbook-stable.argoproj.io http: paths: - - backend: - serviceName: guestbook-stable - servicePort: 443 - path: /* + - path: / + pathType: Prefix + backend: + service: + name: guestbook-stable + port: + number: 443 + # default rule which omits host, and will split traffic between desired vs. stable - http: paths: - - backend: - serviceName: guestbook-root - servicePort: 443 - path: /* + - path: / + pathType: Prefix + backend: + service: + name: guestbook-root + port: + number: 443 ``` The above technique has the a benefit in that it would not incur additional cost of allocating @@ -56,8 +132,10 @@ additional load balancers. ## Reducing operator memory usage On clusters with thousands of rollouts memory usage for the argo-rollouts -operator can be reduced significantly by changing RevisionHistoryLimit from the -default of 10 to a lower number. One user of Argo Rollouts saw a 27% reduction +controller can be reduced significantly by changing the `RevisionHistoryLimit` property from the +default of 10 to a lower number. + +One user of Argo Rollouts saw a 27% reduction in memory usage for a cluster with 1290 rollouts by changing -RevisionHistoryLimit from 10 to 0. +`RevisionHistoryLimit` from 10 to 0. diff --git a/docs/features/analysis.md b/docs/features/analysis.md index 1a81a71274..4029164425 100644 --- a/docs/features/analysis.md +++ b/docs/features/analysis.md @@ -2,7 +2,7 @@ Argo Rollouts provides several ways to perform analysis to drive progressive delivery. This document describes how to achieve various forms of progressive delivery, varying the point in -time analysis is performed, it's frequency, and occurrence. +time analysis is performed, its frequency, and occurrence. ## Custom Resource Definitions @@ -358,6 +358,184 @@ templates together. The controller combines the `metrics` and `args` fields of a * Multiple metrics in the templates have the same name * Two arguments with the same name have different default values no matter the argument value in Rollout +## Analysis Template referencing other Analysis Templates + +AnalysisTemplates and ClusterAnalysisTemplates may reference other templates. + +They can be combined with other metrics: + +=== "AnalysisTemplate" + + ```yaml + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisTemplate + metadata: + name: error-rate + spec: + args: + - name: service-name + metrics: + - name: error-rate + interval: 5m + successCondition: result[0] <= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code=~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + --- + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisTemplate + metadata: + name: rates + spec: + args: + - name: service-name + metrics: + - name: success-rate + interval: 5m + successCondition: result[0] >= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + templates: + - templateName: error-rate + clusterScope: false + ``` + +Or without additional metrics: + +=== "AnalysisTemplate" + + ```yaml + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisTemplate + metadata: + name: success-rate + spec: + args: + - name: service-name + metrics: + - name: success-rate + interval: 5m + successCondition: result[0] >= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + --- + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisTemplate + metadata: + name: error-rate + spec: + args: + - name: service-name + metrics: + - name: error-rate + interval: 5m + successCondition: result[0] <= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code=~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + --- + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisTemplate + metadata: + name: rates + spec: + args: + - name: service-name + templates: + - templateName: success-rate + clusterScope: false + - templateName: error-rate + clusterScope: false + ``` + + The result in the AnalysisRun will have the aggregation of metrics of each template: + + === "AnalysisRun" + + ```yaml + # NOTE: Generated AnalysisRun from a single template referencing several templates + apiVersion: argoproj.io/v1alpha1 + kind: AnalysisRun + metadata: + name: guestbook-CurrentPodHash-templates-in-template + spec: + args: + - name: service-name + value: guestbook-svc.default.svc.cluster.local + metrics: + - name: success-rate + interval: 5m + successCondition: result[0] >= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + - name: error-rate + interval: 5m + successCondition: result[0] <= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code=~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + ``` + +!!! note + The same limitations as for the multiple templates feature apply. + The controller will error when merging the templates if: + + * Multiple metrics in the templates have the same name + * Two arguments with the same name have different default values no matter the argument value in Rollout + + However, if the same AnalysisTemplate is referenced several times along the chain of references, the controller will only keep it once and discard the other references. + ## Analysis Template Arguments AnalysisTemplates may declare a set of arguments that can be passed by Rollouts. The args can then be used as in metrics configuration and are resolved at the time the AnalysisRun is created. Argument placeholders are defined as @@ -854,6 +1032,24 @@ spec: limit: 20 ``` +## Time-to-live (TTL) Strategy + +!!! important + Available since v1.7 + +`ttlStrategy` limits the lifetime of an analysis run that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the run finishes, it will be deleted after the time to live expires. If this field is unset, the analysis controller will keep the completed runs, unless they are associated with rollouts using other garbage collection policies (e.g. `successfulRunHistoryLimit` and `unsuccessfulRunHistoryLimit`). + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +spec: + ... + ttlStrategy: + secondsAfterCompletion: 3600 + secondsAfterSuccess: 1800 + secondsAfterFailure: 1800 +``` + ## Inconclusive Runs Analysis runs can also be considered `Inconclusive`, which indicates the run was neither successful, diff --git a/docs/features/anti-affinity/anti-affinity.md b/docs/features/anti-affinity/anti-affinity.md index 3c46171d70..2a547c9c07 100644 --- a/docs/features/anti-affinity/anti-affinity.md +++ b/docs/features/anti-affinity/anti-affinity.md @@ -31,6 +31,7 @@ You can learn more about anti-affinity [here](https://kubernetes.io/docs/concept Repeating the above example with anti-affinity enabled, here is what happens when the `.spec.template` of the Rollout changes. Due to anti-affinity, the new pods cannot be scheduled on nodes which run the old ReplicaSet's pods. As a result, the cluster auto-scaler must create 2 nodes to host the new ReplicaSet's pods. In this case, pods won't be started since the scaled-down nodes are guaranteed to not have the new pods. + ![ Original Rollout is running, spread across two nodes](images/solution.png) ## Enabling Anti-Affinity in Rollouts diff --git a/docs/features/bluegreen.md b/docs/features/bluegreen.md index 1cded0d86a..cc3e819adf 100644 --- a/docs/features/bluegreen.md +++ b/docs/features/bluegreen.md @@ -75,7 +75,7 @@ The following describes the sequence of events that happen during a blue-green u 1. Beginning at a fully promoted, steady-state, a revision 1 ReplicaSet is pointed to by both the `activeService` and `previewService`. 1. A user initiates an update by modifying the pod template (`spec.template.spec`). 1. The revision 2 ReplicaSet is created with size 0. -1. The preview service is modified to point to the revision 2 ReplicaSet. The `activeService` remains pointing to revision 1. +1. The `previewService` is modified to point to the revision 2 ReplicaSet. The `activeService` remains pointing to revision 1. 1. The revision 2 ReplicaSet is scaled to either `spec.replicas` or `previewReplicaCount` if set. 1. Once revision 2 ReplicaSet Pods are fully available, `prePromotionAnalysis` begins. 1. Upon success of `prePromotionAnalysis`, the blue/green pauses if `autoPromotionEnabled` is false, or `autoPromotionSeconds` is non-zero. diff --git a/docs/features/canary.md b/docs/features/canary.md index 55269cbdc1..4e3dab50c8 100644 --- a/docs/features/canary.md +++ b/docs/features/canary.md @@ -1,5 +1,5 @@ # Canary Deployment Strategy -A canary rollout is a deployment strategy where the operator releases a new version of their application to a small percentage of the production traffic. +A canary rollout is a deployment strategy where the operator releases a new version of their application to a small percentage of the production traffic. ## Overview Since there is no agreed upon standard for a canary deployment, the rollouts controller allows users to outline how they want to run their canary deployment. Users can define a list of steps the controller uses to manipulate the ReplicaSets when there is a change to the `.spec.template`. Each step will be evaluated before the new ReplicaSet is promoted to the stable version, and the old version is completely scaled down. @@ -7,7 +7,7 @@ Since there is no agreed upon standard for a canary deployment, the rollouts con Each step can have one of two fields. The `setWeight` field dictates the percentage of traffic that should be sent to the canary, and the `pause` struct instructs the rollout to pause. When the controller reaches a `pause` step for a rollout, it will add a `PauseCondition` struct to the `.status.PauseConditions` field. If the `duration` field within the `pause` struct is set, the rollout will not progress to the next step until it has waited for the value of the `duration` field. Otherwise, the rollout will wait indefinitely until that Pause condition is removed. By using the `setWeight` and the `pause` fields, a user can declaratively describe how they want to progress to the new version. Below is an example of a canary strategy. !!! important - If the canary Rollout does not use [traffic management](traffic-management/index.md), the Rollout makes a best effort attempt to achieve the percentage listed in the last `setWeight` step between the new and old version. For example, if a Rollout has 10 Replicas and 10% for the first `setWeight` step, the controller will scale the new desired ReplicaSet to 1 replicas and the old stable ReplicaSet to 9. In the case where the setWeight is 15%, the Rollout attempts to get there by rounding up the calculation (i.e. the new ReplicaSet has 2 pods since 15% of 10 rounds up to 2 and the old ReplicaSet has 9 pods since 85% of 10 rounds up to 9). If a user wants to have more fine-grained control of the percentages without a large number of Replicas, that user should use the [traffic management](#trafficrouting) functionality. + If the canary Rollout does not use [traffic management](traffic-management/index.md), the Rollout makes a best effort attempt to achieve the percentage listed in the last `setWeight` step between the new and old version. For example, if a Rollout has 10 Replicas and 10% for the first `setWeight` step, the controller will scale the new desired ReplicaSet to 1 replicas and the old stable ReplicaSet to 9. In the case where the setWeight is 41%, the Rollout attempts to get there by finding the whole number with the smallest delta, rounding up the calculation if the deltas are equals (i.e. the new ReplicaSet has 4 pods since 41% of 10 is closer to 4/10 than 5/10, and the old ReplicaSet has 6 pods). If a user wants to have more fine-grained control of the percentages without a large number of Replicas, that user should use the [traffic management](#trafficrouting) functionality. ## Example ```yaml @@ -59,7 +59,7 @@ spec: - pause: {} # pause indefinitely ``` -If no `duration` is specified for a pause step, the rollout will be paused indefinitely. To unpause, use the [argo kubectl plugin](kubectl-plugin.md) `promote` command. +If no `duration` is specified for a pause step, the rollout will be paused indefinitely. To unpause, use the [argo kubectl plugin](kubectl-plugin.md) `promote` command. ```shell # promote to the next step diff --git a/docs/features/kustomize.md b/docs/features/kustomize.md index 4efa4144a0..e1a691b511 100644 --- a/docs/features/kustomize.md +++ b/docs/features/kustomize.md @@ -4,7 +4,7 @@ Kustomize can be extended to understand CRD objects through the use of [transformer configs](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs). Using transformer configs, kustomize can be "taught" about the structure of a Rollout object and leverage kustomize features such as ConfigMap/Secret generators, variable references, and common -labels & annotations. To use Rollouts with kustomize: +labels & annotations. To use Rollouts with kustomize: 1. Download [`rollout-transform.yaml`](kustomize/rollout-transform.yaml) into your kustomize directory. @@ -65,18 +65,18 @@ resources: openapi: path: https://raw.githubusercontent.com/argoproj/argo-schema-generator/main/schema/argo_all_k8s_kustomize_schema.json -patchesStrategicMerge: -- |- - apiVersion: argoproj.io/v1alpha1 - kind: Rollout - metadata: - name: rollout-canary - spec: - template: - spec: - containers: - - name: rollouts-demo - image: nginx +patches: +- patch: |- + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + metadata: + name: rollout-canary + spec: + template: + spec: + containers: + - name: rollouts-demo + image: nginx ``` The OpenAPI data is auto-generated and defined in this [file](https://github.com/argoproj/argo-schema-generator/blob/main/schema/argo_all_k8s_kustomize_schema.json). diff --git a/docs/features/kustomize/rollout_cr_schema.json b/docs/features/kustomize/rollout_cr_schema.json index 6d8420e916..b7ca35204f 100644 --- a/docs/features/kustomize/rollout_cr_schema.json +++ b/docs/features/kustomize/rollout_cr_schema.json @@ -243,19 +243,45 @@ }, "datadog": { "properties": { + "aggregator": { + "enum": [ + "avg", + "min", + "max", + "sum", + "last", + "percentile", + "mean", + "l2norm", + "area" + ], + "type": "string" + }, "apiVersion": { + "default": "v1", + "enum": [ + "v1", + "v2" + ], + "type": "string" + }, + "formula": { "type": "string" }, "interval": { + "default": "5m", "type": "string" }, + "queries": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, "query": { "type": "string" } }, - "required": [ - "query" - ], "type": "object" }, "graphite": { @@ -309,6 +335,10 @@ "format": "int32", "type": "integer" }, + "backoffLimitPerIndex": { + "format": "int32", + "type": "integer" + }, "completionMode": { "type": "string" }, @@ -319,6 +349,10 @@ "manualSelector": { "type": "boolean" }, + "maxFailedIndexes": { + "format": "int32", + "type": "integer" + }, "parallelism": { "format": "int32", "type": "integer" @@ -375,8 +409,7 @@ } }, "required": [ - "action", - "onPodConditions" + "action" ], "type": "object" }, @@ -389,6 +422,9 @@ ], "type": "object" }, + "podReplacementPolicy": { + "type": "string" + }, "selector": { "properties": { "matchExpressions": { @@ -639,6 +675,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -742,6 +792,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -841,6 +905,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -944,6 +1022,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -1223,6 +1315,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -1307,6 +1411,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -1618,8 +1734,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -1629,6 +1782,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -2173,6 +2329,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -2257,6 +2425,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -2568,8 +2748,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -2579,6 +2796,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -3133,6 +3353,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -3217,6 +3449,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -3528,8 +3772,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -3539,6 +3820,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -3889,6 +4173,35 @@ }, "type": "array" }, + "resourceClaims": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "source": { + "properties": { + "resourceClaimName": { + "type": "string" + }, + "resourceClaimTemplateName": { + "type": "string" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "restartPolicy": { "type": "string" }, @@ -3898,6 +4211,24 @@ "schedulerName": { "type": "string" }, + "schedulingGates": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "securityContext": { "properties": { "fsGroup": { @@ -4293,6 +4624,26 @@ }, "authentication": { "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, "sigv4": { "properties": { "profile": { @@ -4368,19 +4719,58 @@ }, "web": { "properties": { - "body": { - "type": "string" - }, - "headers": { - "items": { - "properties": { - "key": { - "type": "string" - }, - "value": { - "type": "string" - } - }, + "authentication": { + "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, + "sigv4": { + "properties": { + "profile": { + "type": "string" + }, + "region": { + "type": "string" + }, + "roleArn": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "body": { + "type": "string" + }, + "headers": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, "required": [ "key", "value" @@ -4690,19 +5080,45 @@ }, "datadog": { "properties": { + "aggregator": { + "enum": [ + "avg", + "min", + "max", + "sum", + "last", + "percentile", + "mean", + "l2norm", + "area" + ], + "type": "string" + }, "apiVersion": { + "default": "v1", + "enum": [ + "v1", + "v2" + ], + "type": "string" + }, + "formula": { "type": "string" }, "interval": { + "default": "5m", "type": "string" }, + "queries": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, "query": { "type": "string" } }, - "required": [ - "query" - ], "type": "object" }, "graphite": { @@ -4756,6 +5172,10 @@ "format": "int32", "type": "integer" }, + "backoffLimitPerIndex": { + "format": "int32", + "type": "integer" + }, "completionMode": { "type": "string" }, @@ -4766,6 +5186,10 @@ "manualSelector": { "type": "boolean" }, + "maxFailedIndexes": { + "format": "int32", + "type": "integer" + }, "parallelism": { "format": "int32", "type": "integer" @@ -4822,8 +5246,7 @@ } }, "required": [ - "action", - "onPodConditions" + "action" ], "type": "object" }, @@ -4836,6 +5259,9 @@ ], "type": "object" }, + "podReplacementPolicy": { + "type": "string" + }, "selector": { "properties": { "matchExpressions": { @@ -5086,6 +5512,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -5189,6 +5629,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -5288,6 +5742,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -5391,6 +5859,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -5670,6 +6152,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -5754,6 +6248,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -6065,8 +6571,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -6076,6 +6619,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -6620,6 +7166,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -6704,6 +7262,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -7015,8 +7585,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -7026,6 +7633,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -7580,6 +8190,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -7664,6 +8286,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -7975,8 +8609,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -7986,6 +8657,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -8336,29 +9010,76 @@ }, "type": "array" }, - "restartPolicy": { - "type": "string" - }, - "runtimeClassName": { - "type": "string" - }, - "schedulerName": { - "type": "string" - }, - "securityContext": { - "properties": { - "fsGroup": { - "format": "int64", - "type": "integer" - }, - "fsGroupChangePolicy": { - "type": "string" - }, - "runAsGroup": { - "format": "int64", - "type": "integer" - }, - "runAsNonRoot": { + "resourceClaims": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "source": { + "properties": { + "resourceClaimName": { + "type": "string" + }, + "resourceClaimTemplateName": { + "type": "string" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "restartPolicy": { + "type": "string" + }, + "runtimeClassName": { + "type": "string" + }, + "schedulerName": { + "type": "string" + }, + "schedulingGates": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "securityContext": { + "properties": { + "fsGroup": { + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "type": "string" + }, + "runAsGroup": { + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { "type": "boolean" }, "runAsUser": { @@ -8740,6 +9461,26 @@ }, "authentication": { "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, "sigv4": { "properties": { "profile": { @@ -8815,6 +9556,45 @@ }, "web": { "properties": { + "authentication": { + "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, + "sigv4": { + "properties": { + "profile": { + "type": "string" + }, + "region": { + "type": "string" + }, + "roleArn": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "body": { "type": "string" }, @@ -8878,11 +9658,24 @@ "type": "array", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + }, + "templates": { + "items": { + "properties": { + "clusterScope": { + "type": "boolean" + }, + "templateName": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "templateName", + "x-kubernetes-patch-strategy": "merge" } }, - "required": [ - "metrics" - ], "type": "object" } }, @@ -9137,19 +9930,45 @@ }, "datadog": { "properties": { + "aggregator": { + "enum": [ + "avg", + "min", + "max", + "sum", + "last", + "percentile", + "mean", + "l2norm", + "area" + ], + "type": "string" + }, "apiVersion": { + "default": "v1", + "enum": [ + "v1", + "v2" + ], + "type": "string" + }, + "formula": { "type": "string" }, "interval": { + "default": "5m", "type": "string" }, + "queries": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, "query": { "type": "string" } }, - "required": [ - "query" - ], "type": "object" }, "graphite": { @@ -9203,6 +10022,10 @@ "format": "int32", "type": "integer" }, + "backoffLimitPerIndex": { + "format": "int32", + "type": "integer" + }, "completionMode": { "type": "string" }, @@ -9213,6 +10036,10 @@ "manualSelector": { "type": "boolean" }, + "maxFailedIndexes": { + "format": "int32", + "type": "integer" + }, "parallelism": { "format": "int32", "type": "integer" @@ -9269,8 +10096,7 @@ } }, "required": [ - "action", - "onPodConditions" + "action" ], "type": "object" }, @@ -9283,6 +10109,9 @@ ], "type": "object" }, + "podReplacementPolicy": { + "type": "string" + }, "selector": { "properties": { "matchExpressions": { @@ -9533,6 +10362,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -9636,6 +10479,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -9735,6 +10592,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -9838,6 +10709,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -10117,6 +11002,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -10201,6 +11098,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -10512,8 +11421,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -10523,6 +11469,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -11067,6 +12016,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -11151,6 +12112,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -11462,8 +12435,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -11473,6 +12483,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -12027,6 +13040,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -12111,6 +13136,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -12422,8 +13459,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -12433,6 +13507,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -12783,6 +13860,35 @@ }, "type": "array" }, + "resourceClaims": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "source": { + "properties": { + "resourceClaimName": { + "type": "string" + }, + "resourceClaimTemplateName": { + "type": "string" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "restartPolicy": { "type": "string" }, @@ -12792,6 +13898,24 @@ "schedulerName": { "type": "string" }, + "schedulingGates": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "securityContext": { "properties": { "fsGroup": { @@ -13187,6 +14311,26 @@ }, "authentication": { "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, "sigv4": { "properties": { "profile": { @@ -13262,6 +14406,45 @@ }, "web": { "properties": { + "authentication": { + "properties": { + "oauth2": { + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "scopes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "tokenUrl": { + "type": "string" + } + }, + "type": "object" + }, + "sigv4": { + "properties": { + "profile": { + "type": "string" + }, + "region": { + "type": "string" + }, + "roleArn": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "body": { "type": "string" }, @@ -13325,11 +14508,24 @@ "type": "array", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + }, + "templates": { + "items": { + "properties": { + "clusterScope": { + "type": "boolean" + }, + "templateName": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "templateName", + "x-kubernetes-patch-strategy": "merge" } }, - "required": [ - "metrics" - ], "type": "object" } }, @@ -13725,6 +14921,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -13828,6 +15038,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -13927,6 +15151,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -14030,6 +15268,20 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "matchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "namespaceSelector": { "properties": { "matchExpressions": { @@ -14309,6 +15561,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -14393,6 +15657,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -14704,8 +15980,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -14715,6 +16028,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -15259,6 +16575,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -15343,6 +16671,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -15654,8 +16994,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -15665,6 +17042,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -16219,6 +17599,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -16303,6 +17695,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -16612,10 +18016,47 @@ "type": "integer" } }, - "type": "object" + "type": "object" + }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -16625,6 +18066,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -16975,6 +18419,35 @@ }, "type": "array" }, + "resourceClaims": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "source": { + "properties": { + "resourceClaimName": { + "type": "string" + }, + "resourceClaimTemplateName": { + "type": "string" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "restartPolicy": { "type": "string" }, @@ -16984,6 +18457,24 @@ "schedulerName": { "type": "string" }, + "schedulingGates": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "securityContext": { "properties": { "fsGroup": { @@ -17774,6 +19265,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -17858,6 +19361,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -18169,8 +19684,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -18180,6 +19732,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -18689,6 +20244,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -18773,6 +20340,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -19084,8 +20663,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -19095,6 +20711,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -19640,6 +21259,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -19724,6 +21355,18 @@ ], "type": "object" }, + "sleep": { + "properties": { + "seconds": { + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, "tcpSocket": { "properties": { "host": { @@ -20035,8 +21678,45 @@ }, "type": "object" }, + "resizePolicy": { + "items": { + "properties": { + "resourceName": { + "type": "string" + }, + "restartPolicy": { + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "properties": { + "claims": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "x-kubernetes-preserve-unknown-fields": true }, @@ -20046,6 +21726,9 @@ }, "type": "object" }, + "restartPolicy": { + "type": "string" + }, "securityContext": { "properties": { "allowPrivilegeEscalation": { @@ -20338,6 +22021,64 @@ "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "resourceClaims": { + "items": { + "properties": { + "name": { + "type": "string" + }, + "source": { + "properties": { + "resourceClaimName": { + "type": "string" + }, + "resourceClaimTemplateName": { + "type": "string" + } + }, + "type": "object" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, + "schedulingGates": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, "topologySpreadConstraints": { "items": { "properties": { diff --git a/docs/features/specification.md b/docs/features/specification.md index 5f2a817c88..607367c0c8 100644 --- a/docs/features/specification.md +++ b/docs/features/specification.md @@ -33,6 +33,13 @@ spec: apiVersion: apps/v1 kind: Deployment name: rollout-ref-deployment + # Specifies if the workload (Deployment) is scaled down after migrating to Rollout. + # The possible options are: + # "never": the Deployment is not scaled down + # "onsuccess": the Deployment is scaled down after the Rollout becomes healthy + # "progressively": as the Rollout is scaled up the Deployment is scaled down + # If the Rollout fails the Deployment will be scaled back up. + scaleDown: never|onsuccess|progressively # Template describes the pods that will be created. Same as deployment. # If used, then do not use Rollout workloadRef property. @@ -379,6 +386,9 @@ spec: # will achieve traffic split via a weighted replica counts between # the canary and stable ReplicaSet. trafficRouting: + # Supports nginx and plugins only: This lets you control the denominator or total weight of traffic. + # The total weight of traffic. If unspecified, it defaults to 100 + maxTrafficWeight: 1000 # This is a list of routes that Argo Rollouts has the rights to manage it is currently only required for # setMirrorRoute and setHeaderRoute. The order of managedRoutes array also sets the precedence of the route # in the traffic router. Argo Rollouts will place these routes in the order specified above any routes already diff --git a/docs/features/traffic-management/alb.md b/docs/features/traffic-management/alb.md index 0ac0ff6173..5b4c424528 100644 --- a/docs/features/traffic-management/alb.md +++ b/docs/features/traffic-management/alb.md @@ -53,7 +53,7 @@ spec: ingress: ingress # If you want to controll multiple ingress resources you can use the ingresses field, if ingresses is specified # the ingress field will need to be omitted. - ingresses: + ingresses: - ingress-1 - ingress-2 # Reference to a Service that the Ingress must target in one of the rules (optional). @@ -66,7 +66,7 @@ spec: The referenced Ingress should be deployed with an ingress rule that matches the Rollout service: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress @@ -76,14 +76,17 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - # serviceName must match either: canary.trafficRouting.alb.rootService (if specified), - # or canary.stableService (if rootService is omitted) - serviceName: root-service - # servicePort must be the value: use-annotation - # This instructs AWS Load Balancer Controller to look to annotations on how to direct traffic - servicePort: use-annotation + service: + # serviceName must match either: canary.trafficRouting.alb.rootService (if specified), + # or canary.stableService (if rootService is omitted) + name: root-service + # servicePort must be the value: use-annotation + # This instructs AWS Load Balancer Controller to look to annotations on how to direct traffic + port: + name: use-annotation ``` During an update, the rollout controller injects the `alb.ingress.kubernetes.io/actions.` @@ -95,7 +98,7 @@ annotation that splits traffic between the canary-service and stable-service, wi of 10 and 90 respectively: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: ingress @@ -123,10 +126,13 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: root-service - servicePort: use-annotation + service: + name: root-service + port: + name: use-annotation ``` !!! note @@ -411,7 +417,7 @@ spec: By default, Argo Rollout will operate on Ingresses with the annotation: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: @@ -420,7 +426,7 @@ metadata: Or with the `ingressClassName`: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress spec: ingressClassName: alb diff --git a/docs/features/traffic-management/istio.md b/docs/features/traffic-management/istio.md index 415313841a..9d146d545c 100644 --- a/docs/features/traffic-management/istio.md +++ b/docs/features/traffic-management/istio.md @@ -473,6 +473,18 @@ help address this problem. The proposed solution is to introduce an annotation t indicates to Argo CD to respect and preserve the differences at a specified path, in order to allow other controllers (e.g. Argo Rollouts) controller manage them instead. +## Ping Pong + +!!! important + + Available since v1.7 + +Argo Rollouts also supports ping pong when using Istio this was added to support configuring both ALB and +Istio traffic routers at the same time. When using an ALB, ping-pong is generally a best practice especially with ALB readiness +gates enabled. However, when we change the service selectors when a rollout is aborted back to stable pod hash it causes a blip +of traffic outage because the ALB controller will set the pod readiness gates to false for a short while due to the label changes. +If we configure both ALB and Istio with ping-pong this selector change does not happen and hence we do not see any outages. + ## Alternatives Considered ### Rollout ownership over the Virtual Service diff --git a/docs/features/traffic-management/plugins.md b/docs/features/traffic-management/plugins.md index 26e1d6c1bf..7f8d8e1b11 100644 --- a/docs/features/traffic-management/plugins.md +++ b/docs/features/traffic-management/plugins.md @@ -73,8 +73,11 @@ responsibility of the Argo Rollouts administrator to define the plugin installat * This is just a sample plugin that can be used as a starting point for creating your own plugin. It is not meant to be used in production. It is based on the built-in prometheus provider. +#### [Consul](https://github.com/argoproj-labs/rollouts-plugin-trafficrouter-consul) +* This is a plugin that allows argo-rollouts to work with Consul's service mesh for traffic shaping patterns. + #### [Contour](https://github.com/argoproj-labs/rollouts-plugin-trafficrouter-contour) -* This is a plugin for support Contour. +* This is a plugin that allows argo-rollouts to work with contour's resource: HTTPProxy. It enables traffic shaping patterns such as canary releases and more. #### [Gateway API](https://github.com/argoproj-labs/rollouts-plugin-trafficrouter-gatewayapi/) -* Provide support for Gateway API, which includes Kuma, Traefix, cilium, Contour, GloodMesh, HAProxy, and [many others](https://gateway-api.sigs.k8s.io/implementations/#implementation-status). \ No newline at end of file +* Provide support for Gateway API, which includes Kuma, Traefix, cilium, Contour, GloodMesh, HAProxy, and [many others](https://gateway-api.sigs.k8s.io/implementations/#implementation-status). \ No newline at end of file diff --git a/docs/features/traffic-management/smi.md b/docs/features/traffic-management/smi.md index ec635f1729..3f460be93c 100644 --- a/docs/features/traffic-management/smi.md +++ b/docs/features/traffic-management/smi.md @@ -3,12 +3,15 @@ !!! important Available since v0.9.0 +!!! warning + The Cloud Native Computing Foundation [has archived the SMI Spec](https://www.cncf.io/blog/2023/10/03/cncf-archives-the-service-mesh-interface-smi-project/). The recommended way forward is to look at the [Gateway API](https://gateway-api.sigs.k8s.io/), [Project Gamma](https://gateway-api.sigs.k8s.io/concepts/gamma/) and the [Argo Rollouts Gateway API Plugin](https://github.com/argoproj-labs/rollouts-plugin-trafficrouter-gatewayapi). + [Service Mesh Interface](https://smi-spec.io/) (SMI) is a standard interface for service meshes on Kubernetes leveraged by many Service Mesh implementations (like Linkerd). SMI offers this functionality through a set of CRDs, and the Argo Rollouts controller creates these resources to manipulate the traffic routing into the desired state. The Argo Rollout controller achieves traffic shaping by creating and manipulating the [TrafficSplit CR](https://github.com/servicemeshinterface/smi-spec/blob/master/traffic-split.md). A TrafficSplit describes the desired traffic routing for an application and relies on the underlying Service Meshes implement that desired state. Instead of worrying about the details of a specific service mesh, a user needs to specify a root Service that clients use to communicate and a list of backends consisting of a Service and weight. The Service Mesh implementing SMI uses this spec to route traffic to the backends Services based on the weights of the backends. For Rollout users, the Argo Rollout controller creates and manipulates the TrafficSplit using the following information: - Canary Service: Name of the service that sends traffic only to the canary pods -- Stable Service: Name of the service that sends traffic only to the stable po ds +- Stable Service: Name of the service that sends traffic only to the stable pods - Root Service: Name of the service that clients use to communicate. If a request comes to this root service not through a proxy, the standard Kubernetes service routing will be used. Below is an example of a Rollout with all the required fields configured: diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts.md index 4e2bcc21f4..473f555d46 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts.md @@ -41,6 +41,7 @@ kubectl argo rollouts retry guestbook --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server -h, --help help for kubectl-argo-rollouts --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_abort.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_abort.md index f022a09b65..044ad9bc84 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_abort.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_abort.md @@ -38,6 +38,7 @@ kubectl argo rollouts abort guestbook --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_completion.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_completion.md index a22b057163..68cc073f38 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_completion.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_completion.md @@ -66,6 +66,7 @@ kubectl argo rollouts completion [bash|zsh|fish|powershell] --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create.md index ce4ad148ce..789bb71785 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create.md @@ -38,6 +38,7 @@ kubectl argo rollouts create -f my-experiment.yaml -w --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create_analysisrun.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create_analysisrun.md index 3f43c1f2a0..2714a0566f 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create_analysisrun.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_create_analysisrun.md @@ -51,6 +51,7 @@ kubectl argo rollouts create analysisrun --global --from my-analysis-cluster-tem --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_dashboard.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_dashboard.md index d51cdb52c7..eb6ee9e6fc 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_dashboard.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_dashboard.md @@ -30,6 +30,7 @@ kubectl argo rollouts dashboard [flags] --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get.md index 552551a9f0..7059ab9d0c 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get.md @@ -41,6 +41,7 @@ kubectl argo rollouts get experiment my-experiment --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_experiment.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_experiment.md index de3c438670..56fbb3d996 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_experiment.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_experiment.md @@ -52,6 +52,7 @@ kubectl argo rollouts get experiment my-experiment -w --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_rollout.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_rollout.md index 2581c7d1c8..77b04d2217 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_rollout.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_get_rollout.md @@ -53,6 +53,7 @@ kubectl argo rollouts get rollout guestbook -w --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_lint.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_lint.md index db560b6399..b0997bc2f2 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_lint.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_lint.md @@ -36,6 +36,7 @@ kubectl argo rollouts lint -f my-rollout.yaml --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list.md index cdc673efda..bbaa94c809 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list.md @@ -39,6 +39,7 @@ kubectl argo rollouts list experiments --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_experiments.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_experiments.md index f15c3aa86b..4add56c247 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_experiments.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_experiments.md @@ -42,6 +42,7 @@ kubectl argo rollouts list experiments --watch --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_rollouts.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_rollouts.md index 503091170e..e861ca20c8 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_rollouts.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_list_rollouts.md @@ -45,6 +45,7 @@ kubectl argo rollouts list rollouts --watch --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications.md index 72a7b4635b..f32667d9f6 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications.md @@ -22,6 +22,7 @@ kubectl argo rollouts notifications [flags] --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server -h, --help help for notifications --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template.md index 2fc9e2a9ef..0617a720e8 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template.md @@ -29,6 +29,7 @@ kubectl argo rollouts notifications template [flags] --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_get.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_get.md index 7dec5ef4ce..4748720d92 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_get.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_get.md @@ -41,6 +41,7 @@ kubectl argo rollouts notifications template get app-sync-succeeded -o=yaml --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_notify.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_notify.md index ecd2e1b663..c9a2a100e9 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_notify.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_template_notify.md @@ -42,6 +42,7 @@ kubectl argo rollouts notifications template notify app-sync-succeeded guestbook --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger.md index 6ba01befea..d18d2750d9 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger.md @@ -29,6 +29,7 @@ kubectl argo rollouts notifications trigger [flags] --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_get.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_get.md index 11f1586772..3efb765450 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_get.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_get.md @@ -41,6 +41,7 @@ kubectl argo rollouts notifications trigger get on-sync-failed -o=yaml --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_run.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_run.md index 9782976189..64202b0f7d 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_run.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_notifications_trigger_run.md @@ -41,6 +41,7 @@ kubectl argo rollouts notifications trigger run on-sync-status-unknown ./sample- --cluster string The name of the kubeconfig cluster to use --config-map string argo-rollouts-notification-configmap.yaml file path --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to a kube config. Only required if out-of-cluster diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_pause.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_pause.md index c90c2efbaa..a40f67ea40 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_pause.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_pause.md @@ -35,6 +35,7 @@ kubectl argo rollouts pause guestbook --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_promote.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_promote.md index 5e22f988b9..9d140bf6e4 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_promote.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_promote.md @@ -42,6 +42,7 @@ kubectl argo rollouts promote guestbook --full --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_restart.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_restart.md index c7fa91ea44..94f94125b5 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_restart.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_restart.md @@ -39,6 +39,7 @@ kubectl argo rollouts restart ROLLOUT_NAME --in 10s --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry.md index 8d4fcf14e0..fc150355c3 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry.md @@ -38,6 +38,7 @@ kubectl argo rollouts retry experiment my-experiment --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_experiment.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_experiment.md index 2ab8acf4d7..23d7d4b9e1 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_experiment.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_experiment.md @@ -35,6 +35,7 @@ kubectl argo rollouts retry experiment my-experiment --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_rollout.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_rollout.md index 4b1b664624..049ecc2088 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_rollout.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_retry_rollout.md @@ -35,6 +35,7 @@ kubectl argo rollouts retry rollout guestbook --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set.md index 1944152089..b77a16231a 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set.md @@ -35,6 +35,7 @@ kubectl argo rollouts set image my-rollout demo=argoproj/rollouts-demo:yellow --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set_image.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set_image.md index 10ec065625..5f89a6a506 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set_image.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_set_image.md @@ -35,6 +35,7 @@ kubectl argo rollouts set image my-rollout www=image:v2 --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_status.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_status.md index c57023eb56..e1221d4056 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_status.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_status.md @@ -42,6 +42,7 @@ kubectl argo rollouts status --timeout 60s guestbook --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate.md index 0041f90d4b..4491cca5fe 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate.md @@ -38,6 +38,7 @@ kubectl argo rollouts terminate experiment my-experiment --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_analysisrun.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_analysisrun.md index 8dece8a431..268b7a0150 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_analysisrun.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_analysisrun.md @@ -35,6 +35,7 @@ kubectl argo rollouts terminate analysis guestbook-877894d5b-4-success-rate.1 --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_experiment.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_experiment.md index 85d793ba68..d99866bb35 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_experiment.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_terminate_experiment.md @@ -35,6 +35,7 @@ kubectl argo rollouts terminate experiment my-experiment --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_undo.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_undo.md index 1cc0c826c8..0ddf539dc9 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_undo.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_undo.md @@ -39,6 +39,7 @@ kubectl argo rollouts undo guestbook --to-revision=3 --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_version.md b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_version.md index 7294c5dd12..5f8d91d5da 100644 --- a/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_version.md +++ b/docs/generated/kubectl-argo-rollouts/kubectl-argo-rollouts_version.md @@ -39,6 +39,7 @@ kubectl argo rollouts version --short --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use + --disable-compression If true, opt-out of response compression for all requests to the server --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure -v, --kloglevel int Log level for kubernetes client library --kubeconfig string Path to the kubeconfig file to use for CLI requests. diff --git a/docs/generated/notification-services/alertmanager.md b/docs/generated/notification-services/alertmanager.md index e0f9d7e4e7..556bd749d2 100755 --- a/docs/generated/notification-services/alertmanager.md +++ b/docs/generated/notification-services/alertmanager.md @@ -43,7 +43,7 @@ You should turn off "send_resolved" or you will receive unnecessary recovery not apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.alertmanager: | targets: @@ -58,7 +58,7 @@ If your alertmanager has changed the default api, you can customize "apiPath". apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.alertmanager: | targets: @@ -70,7 +70,7 @@ data: ### Send high availability alertmanager with auth -Store auth token in `argocd-notifications-secret` Secret and use configure in `argocd-notifications-cm` ConfigMap. +Store auth token in `argo-rollouts-notification-secret` Secret and use configure in `argo-rollouts-notification-configmap` ConfigMap. ```yaml apiVersion: v1 @@ -89,7 +89,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.alertmanager: | targets: @@ -110,7 +110,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.alertmanager: | targets: diff --git a/docs/generated/notification-services/awssqs.md b/docs/generated/notification-services/awssqs.md index 6bbc47cbbc..dbfbd708ff 100755 --- a/docs/generated/notification-services/awssqs.md +++ b/docs/generated/notification-services/awssqs.md @@ -1,13 +1,13 @@ -# AWS SQS +# AWS SQS ## Parameters -This notification service is capable of sending simple messages to AWS SQS queue. +This notification service is capable of sending simple messages to AWS SQS queue. -* `queue` - name of the queue you are intending to send messages to. Can be overwriten with target destination annotation. +* `queue` - name of the queue you are intending to send messages to. Can be overridden with target destination annotation. * `region` - region of the sqs queue can be provided via env variable AWS_DEFAULT_REGION * `key` - optional, aws access key must be either referenced from a secret via variable or via env variable AWS_ACCESS_KEY_ID -* `secret` - optional, aws access secret must be either referenced from a secret via variableor via env variable AWS_SECRET_ACCESS_KEY +* `secret` - optional, aws access secret must be either referenced from a secret via variable or via env variable AWS_SECRET_ACCESS_KEY * `account` optional, external accountId of the queue * `endpointUrl` optional, useful for development with localstack @@ -30,7 +30,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.awssqs: | region: "us-east-2" @@ -63,7 +63,7 @@ stringData: ### Minimal configuration using AWS Env variables -Ensure following list of enviromental variable is injected via OIDC, or other method. And assuming SQS is local to the account. +Ensure the following list of environment variables are injected via OIDC, or another method. And assuming SQS is local to the account. You may skip usage of secret for sensitive data and omit other parameters. (Setting parameters via ConfigMap takes precedent.) Variables: @@ -89,7 +89,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.awssqs: | queue: "myqueue" @@ -104,3 +104,16 @@ data: - oncePer: obj.metadata.annotations["generation"] ``` + +## FIFO SQS Queues + +FIFO queues require a [MessageGroupId](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html#SQS-SendMessage-request-MessageGroupId) to be sent along with every message, every message with a matching MessageGroupId will be processed one by one in order. + +To send to a FIFO SQS Queue you must include a `messageGroupId` in the template such as in the example below: + +```yaml +template.deployment-ready: | + message: | + Deployment {{.obj.metadata.name}} is ready! + messageGroupId: {{.obj.metadata.name}}-deployment +``` diff --git a/docs/generated/notification-services/email.md b/docs/generated/notification-services/email.md index b81ab6cde8..f3eca407a2 100755 --- a/docs/generated/notification-services/email.md +++ b/docs/generated/notification-services/email.md @@ -20,7 +20,7 @@ The following snippet contains sample Gmail service configuration: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.email.gmail: | username: $email-username @@ -36,7 +36,7 @@ Without authentication: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.email.example: | host: smtp.example.com @@ -52,7 +52,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: template.app-sync-succeeded: | email: diff --git a/docs/generated/notification-services/github.md b/docs/generated/notification-services/github.md index 913efef6ec..dd55c88ddd 100755 --- a/docs/generated/notification-services/github.md +++ b/docs/generated/notification-services/github.md @@ -17,14 +17,14 @@ The GitHub notification service changes commit status using [GitHub Apps](https: 3. Generate a private key, and download it automatically ![3](https://user-images.githubusercontent.com/18019529/108397926-d4a36300-725b-11eb-83fe-74795c8c3e03.png) 4. Install app to account -5. Store privateKey in `argocd-notifications-secret` Secret and configure GitHub integration -in `argocd-notifications-cm` ConfigMap +5. Store privateKey in `argo-rollouts-notification-secret` Secret and configure GitHub integration +in `argo-rollouts-notification-configmap` ConfigMap ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.github: | appID: @@ -76,6 +76,7 @@ template.app-deployed: | logURL: "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true" requiredContexts: [] autoMerge: true + transientEnvironment: false pullRequestComment: content: | Application {{.app.metadata.name}} is now running new version of deployments manifests. @@ -87,5 +88,5 @@ template.app-deployed: | - If `github.repoURLPath` and `github.revisionPath` are same as above, they can be omitted. - Automerge is optional and `true` by default for github deployments to ensure the requested ref is up to date with the default branch. Setting this option to `false` is required if you would like to deploy older refs in your default branch. - For more information see the [Github Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment). + For more information see the [GitHub Deployment API Docs](https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#create-a-deployment). - If `github.pullRequestComment.content` is set to 65536 characters or more, it will be truncated. diff --git a/docs/generated/notification-services/googlechat.md b/docs/generated/notification-services/googlechat.md index fa3bdce8da..c8cdf036e3 100755 --- a/docs/generated/notification-services/googlechat.md +++ b/docs/generated/notification-services/googlechat.md @@ -13,13 +13,13 @@ The Google Chat notification service send message notifications to a google chat 3. Under **Incoming Webhooks**, click **Add Webhook** 4. Give a name to the webhook, optionally add an image and click **Save** 5. Copy the URL next to your webhook -6. Store the URL in `argocd-notification-secret` and declare it in `argocd-notifications-cm` +6. Store the URL in `argocd-notification-secret` and declare it in `argo-rollouts-notification-configmap` ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.googlechat: | webhooks: @@ -89,7 +89,7 @@ It is possible send both simple text and card messages in a chat thread by speci ```yaml template.app-sync-succeeded: | - message: The app {{ .app.metadata.name }} has succesfully synced! + message: The app {{ .app.metadata.name }} has successfully synced! googlechat: threadKey: {{ .app.metadata.name }} ``` diff --git a/docs/generated/notification-services/grafana.md b/docs/generated/notification-services/grafana.md index a36672d0fa..7415ca5636 100755 --- a/docs/generated/notification-services/grafana.md +++ b/docs/generated/notification-services/grafana.md @@ -15,13 +15,13 @@ Available parameters : 3. Click "Add API Key" 4. Fill the Key with name `ArgoCD Notification`, role `Editor` and Time to Live `10y` (for example) 5. Click on Add button -6. Store apiKey in `argocd-notifications-secret` Secret and Copy your API Key and define it in `argocd-notifications-cm` ConfigMap +6. Store apiKey in `argo-rollouts-notification-secret` Secret and Copy your API Key and define it in `argo-rollouts-notification-configmap` ConfigMap ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.grafana: | apiUrl: https://grafana.example.com/api diff --git a/docs/generated/notification-services/mattermost.md b/docs/generated/notification-services/mattermost.md index 98e0d0fd7b..69a17a28f6 100755 --- a/docs/generated/notification-services/mattermost.md +++ b/docs/generated/notification-services/mattermost.md @@ -12,14 +12,14 @@ ![1](https://user-images.githubusercontent.com/18019529/111499520-62ed0500-8786-11eb-88b0-d0aade61fed4.png) 2. Invite team ![2](https://user-images.githubusercontent.com/18019529/111500197-1229dc00-8787-11eb-98e5-587ee36c94a9.png) -3. Store token in `argocd-notifications-secret` Secret and configure Mattermost integration -in `argocd-notifications-cm` ConfigMap +3. Store token in `argo-rollouts-notification-secret` Secret and configure Mattermost integration +in `argo-rollouts-notification-configmap` ConfigMap ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.mattermost: | apiURL: diff --git a/docs/generated/notification-services/newrelic.md b/docs/generated/notification-services/newrelic.md index d98288a846..49739c16ea 100755 --- a/docs/generated/notification-services/newrelic.md +++ b/docs/generated/notification-services/newrelic.md @@ -8,13 +8,13 @@ ## Configuration 1. Create a NewRelic [Api Key](https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#user-api-key) -2. Store apiKey in `argocd-notifications-secret` Secret and configure NewRelic integration in `argocd-notifications-cm` ConfigMap +2. Store apiKey in `argo-rollouts-notification-secret` Secret and configure NewRelic integration in `argo-rollouts-notification-configmap` ConfigMap ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.newrelic: | apiURL: diff --git a/docs/generated/notification-services/opsgenie.md b/docs/generated/notification-services/opsgenie.md index 665d0081e7..ec03c40f41 100755 --- a/docs/generated/notification-services/opsgenie.md +++ b/docs/generated/notification-services/opsgenie.md @@ -7,22 +7,58 @@ To be able to send notifications with argocd-notifications you have to create an 3. Click "Teams" in the Menu on the left 4. Select the team that you want to notify 5. In the teams configuration menu select "Integrations" -6. click "Add Integration" in the top right corner +6. Click "Add Integration" in the top right corner 7. Select "API" integration 8. Give your integration a name, copy the "API key" and safe it somewhere for later -9. Make sure the checkboxes for "Create and Update Access" and "enable" are selected, disable the other checkboxes to remove unnecessary permissions -10. Click "Safe Integration" at the bottom -11. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the us/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (european api). -12. You are finished with configuring opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the opsgenie integration in the `argocd-notifications-secret` secret. +9. Click "Edit" in the integration settings +10. Make sure the checkbox for "Create and Update Access" is selected, disable the other checkboxes to remove unnecessary permissions +11. Click "Save" at the bottom +12. Click "Turn on integration" in the top right corner +13. Check your browser for the correct server apiURL. If it is "app.opsgenie.com" then use the US/international api url `api.opsgenie.com` in the next step, otherwise use `api.eu.opsgenie.com` (European API). +14. You are finished with configuring Opsgenie. Now you need to configure argocd-notifications. Use the apiUrl, the team name and the apiKey to configure the Opsgenie integration in the `argo-rollouts-notification-secret` secret. +15. You can find the example `argo-rollouts-notification-configmap` configuration at the below. + +| **Option** | **Required** | **Type** | **Description** | **Example** | +| ------------- | ------------ | -------- | -------------------------------------------------------------------------------------------------------- | -------------------------------- | +| `description` | True | `string` | Description field of the alert that is generally used to provide a detailed information about the alert. | `Hello from Argo CD!` | +| `priority` | False | `string` | Priority level of the alert. Possible values are P1, P2, P3, P4 and P5. Default value is P3. | `P1` | +| `alias` | False | `string` | Client-defined identifier of the alert, that is also the key element of Alert De-Duplication. | `Life is too short for no alias` | +| `note` | False | `string` | Additional note that will be added while creating the alert. | `Error from Argo CD!` | ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.opsgenie: | apiUrl: apiKeys: : + template.opsgenie: | + message: | + [Argo CD] Application {{.app.metadata.name}} has a problem. + opsgenie: + description: | + Application: {{.app.metadata.name}} + Health Status: {{.app.status.health.status}} + Operation State Phase: {{.app.status.operationState.phase}} + Sync Status: {{.app.status.sync.status}} + priority: P1 + alias: {{.app.metadata.name}} + note: Error from Argo CD! + trigger.on-a-problem: | + - description: Application has a problem. + send: + - opsgenie + when: app.status.health.status == 'Degraded' or app.status.operationState.phase in ['Error', 'Failed'] or app.status.sync.status == 'Unknown' +``` + +16. Add annotation in application yaml file to enable notifications for specific Argo CD app. +```yaml + apiVersion: argoproj.io/v1alpha1 + kind: Application + metadata: + annotations: + notifications.argoproj.io/subscribe.on-a-problem.opsgenie: ``` \ No newline at end of file diff --git a/docs/generated/notification-services/overview.md b/docs/generated/notification-services/overview.md index 265e575755..a042d942b1 100755 --- a/docs/generated/notification-services/overview.md +++ b/docs/generated/notification-services/overview.md @@ -1,5 +1,5 @@ -The notification services represent integration with services such as slack, email or custom webhook. Services are configured in `argocd-notifications-cm` ConfigMap -using `service..()` keys and might reference sensitive data from `argocd-notifications-secret` Secret. Following example demonstrates slack +The notification services represent integration with services such as slack, email or custom webhook. Services are configured in `argo-rollouts-notification-configmap` ConfigMap +using `service..()` keys and might reference sensitive data from `argo-rollouts-notification-secret` Secret. Following example demonstrates slack service configuration: ```yaml diff --git a/docs/generated/notification-services/pagerduty.md b/docs/generated/notification-services/pagerduty.md index 0e1ab96533..8ac83bacce 100755 --- a/docs/generated/notification-services/pagerduty.md +++ b/docs/generated/notification-services/pagerduty.md @@ -1,17 +1,17 @@ -# Pagerduty +# PagerDuty ## Parameters -The Pagerduty notification service is used to create pagerduty incidents and requires specifying the following settings: +The PagerDuty notification service is used to create PagerDuty incidents and requires specifying the following settings: -* `pagerdutyToken` - the pagerduty auth token +* `pagerdutyToken` - the PagerDuty auth token * `from` - email address of a valid user associated with the account making the request. * `serviceID` - The ID of the resource. ## Example -The following snippet contains sample Pagerduty service configuration: +The following snippet contains sample PagerDuty service configuration: ```yaml apiVersion: v1 @@ -26,7 +26,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.pagerduty: | token: $pagerdutyToken @@ -35,13 +35,13 @@ data: ## Template -[Notification templates](../templates.md) support specifying subject for pagerduty notifications: +[Notification templates](../templates.md) support specifying subject for PagerDuty notifications: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: template.rollout-aborted: | message: Rollout {{.rollout.metadata.name}} is aborted. @@ -62,5 +62,5 @@ apiVersion: argoproj.io/v1alpha1 kind: Rollout metadata: annotations: - notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "" + notifications.argoproj.io/subscribe.on-rollout-aborted.pagerduty: "" ``` diff --git a/docs/generated/notification-services/pagerduty_v2.md b/docs/generated/notification-services/pagerduty_v2.md index 21e8d942e4..36b91783b3 100755 --- a/docs/generated/notification-services/pagerduty_v2.md +++ b/docs/generated/notification-services/pagerduty_v2.md @@ -28,7 +28,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.pagerdutyv2: | serviceKeys: @@ -43,7 +43,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: template.rollout-aborted: | message: Rollout {{.rollout.metadata.name}} is aborted. @@ -74,5 +74,5 @@ apiVersion: argoproj.io/v1alpha1 kind: Rollout metadata: annotations: - notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "" + notifications.argoproj.io/subscribe.on-rollout-aborted.pagerdutyv2: "" ``` diff --git a/docs/generated/notification-services/pushover.md b/docs/generated/notification-services/pushover.md index 37cb20b277..bdcf1b48b4 100755 --- a/docs/generated/notification-services/pushover.md +++ b/docs/generated/notification-services/pushover.md @@ -1,13 +1,13 @@ # Pushover 1. Create an app at [pushover.net](https://pushover.net/apps/build). -2. Store the API key in `` Secret and define the secret name in `` ConfigMap: +2. Store the API key in `` Secret and define the secret name in `argo-rollouts-notification-configmap` ConfigMap: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.pushover: | token: $pushover-token diff --git a/docs/generated/notification-services/rocketchat.md b/docs/generated/notification-services/rocketchat.md index f115705013..a283d84fa6 100755 --- a/docs/generated/notification-services/rocketchat.md +++ b/docs/generated/notification-services/rocketchat.md @@ -43,7 +43,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.rocketchat: | email: $rocketchat-email diff --git a/docs/generated/notification-services/slack.md b/docs/generated/notification-services/slack.md index 15937597c1..625199525d 100755 --- a/docs/generated/notification-services/slack.md +++ b/docs/generated/notification-services/slack.md @@ -6,11 +6,16 @@ If you want to send message using incoming webhook, you can use [webhook](./webh The Slack notification service configuration includes following settings: -* `token` - the app token -* `apiURL` - optional, the server url, e.g. https://example.com/api -* `username` - optional, the app username -* `icon` - optional, the app icon, e.g. :robot_face: or https://example.com/image.png -* `insecureSkipVerify` - optional bool, true or false +| **Option** | **Required** | **Type** | **Description** | **Example** | +| -------------------- | ------------ | -------------- | --------------- | ----------- | +| `apiURL` | False | `string` | The server URL. | `https://example.com/api` | +| `channels` | False | `list[string]` | | `["my-channel-1", "my-channel-2"]` | +| `icon` | False | `string` | The app icon. | `:robot_face:` or `https://example.com/image.png` | +| `insecureSkipVerify` | False | `bool` | | `true` | +| `signingSecret` | False | `string` | | `8f742231b10e8888abcd99yyyzzz85a5` | +| `token` | **True** | `string` | The app's OAuth access token. | `xoxb-1234567890-1234567890123-5n38u5ed63fgzqlvuyxvxcx6` | +| `username` | False | `string` | The app username. | `argocd` | +| `disableUnfurl` | False | `bool` | Disable slack unfurling links in messages | `true` | ## Configuration @@ -27,7 +32,7 @@ The Slack notification service configuration includes following settings: 1. Create a public or private channel, for this example `my_channel` 1. Invite your slack bot to this channel **otherwise slack bot won't be able to deliver notifications to this channel** -1. Store Oauth access token in `argocd-notifications-secret` secret +1. Store Oauth access token in `argo-rollouts-notification-secret` secret ```yaml apiVersion: v1 @@ -38,13 +43,13 @@ The Slack notification service configuration includes following settings: slack-token: ``` -1. Define service type slack in data section of `argocd-notifications-cm` configmap: +1. Define service type slack in data section of `argo-rollouts-notification-configmap` configmap: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.slack: | token: $slack-token diff --git a/docs/generated/notification-services/teams.md b/docs/generated/notification-services/teams.md index b5b9a228c4..d04a644c60 100755 --- a/docs/generated/notification-services/teams.md +++ b/docs/generated/notification-services/teams.md @@ -12,13 +12,13 @@ The Teams notification service send message notifications using Teams bot and re 2. Find `Incoming Webhook` microsoft app and click on it 3. Press `Add to a team` -> select team and channel -> press `Set up a connector` 4. Enter webhook name and upload image (optional) -5. Press `Create` then copy webhook url and store it in `argocd-notifications-secret` and define it in `argocd-notifications-cm` +5. Press `Create` then copy webhook url and store it in `argo-rollouts-notification-secret` and define it in `argo-rollouts-notification-configmap` ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.teams: | recipientUrls: @@ -113,7 +113,7 @@ template.app-sync-succeeded: | ### summary field -You can set a summary of the message that will be shown on Notifcation & Activity Feed +You can set a summary of the message that will be shown on Notification & Activity Feed ![](https://user-images.githubusercontent.com/6957724/116587921-84c4d480-a94d-11eb-9da4-f365151a12e7.jpg) diff --git a/docs/generated/notification-services/telegram.md b/docs/generated/notification-services/telegram.md index 953c2a9fca..bdec349a57 100755 --- a/docs/generated/notification-services/telegram.md +++ b/docs/generated/notification-services/telegram.md @@ -2,13 +2,13 @@ 1. Get an API token using [@Botfather](https://t.me/Botfather). 2. Store token in `` Secret and configure telegram integration -in `` ConfigMap: +in `argo-rollouts-notification-configmap` ConfigMap: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.telegram: | token: $telegram-token @@ -33,3 +33,12 @@ metadata: annotations: notifications.argoproj.io/subscribe.on-sync-succeeded.telegram: -1000000000000 ``` + +If your private chat contains threads, you can optionally specify a thread id by seperating it with a `|`: +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + annotations: + notifications.argoproj.io/subscribe.on-sync-succeeded.telegram: -1000000000000|2 +``` diff --git a/docs/generated/notification-services/webex.md b/docs/generated/notification-services/webex.md index 440ed1ddc7..01706d1474 100755 --- a/docs/generated/notification-services/webex.md +++ b/docs/generated/notification-services/webex.md @@ -9,7 +9,7 @@ The Webex Teams notification service configuration includes following settings: ## Configuration 1. Create a Webex [Bot](https://developer.webex.com/docs/bots) -1. Copy the bot access [token](https://developer.webex.com/my-apps) and store it in the `argocd-notifications-secret` Secret and configure Webex Teams integration in `argocd-notifications-cm` ConfigMap +1. Copy the bot access [token](https://developer.webex.com/my-apps) and store it in the `argo-rollouts-notification-secret` Secret and configure Webex Teams integration in `argo-rollouts-notification-configmap` ConfigMap ``` yaml apiVersion: v1 @@ -24,7 +24,7 @@ The Webex Teams notification service configuration includes following settings: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webex: | token: $webex-token diff --git a/docs/generated/notification-services/webhook.md b/docs/generated/notification-services/webhook.md index bd45b1f69e..f1c88c2d13 100755 --- a/docs/generated/notification-services/webhook.md +++ b/docs/generated/notification-services/webhook.md @@ -1,7 +1,7 @@ # Webhook The webhook notification service allows sending a generic HTTP request using the templatized request body and URL. -Using Webhook you might trigger a Jenkins job, update Github commit status. +Using Webhook you might trigger a Jenkins job, update GitHub commit status. ## Parameters @@ -9,20 +9,29 @@ The Webhook notification service configuration includes following settings: - `url` - the url to send the webhook to - `headers` - optional, the headers to pass along with the webhook -- `basicAuth` - optional, the basic authentication to pass along with the webook +- `basicAuth` - optional, the basic authentication to pass along with the webhook - `insecureSkipVerify` - optional bool, true or false +- `retryWaitMin` - Optional, the minimum wait time between retries. Default value: 1s. +- `retryWaitMax` - Optional, the maximum wait time between retries. Default value: 5s. +- `retryMax` - Optional, the maximum number of retries. Default value: 3. + +## Retry Behavior + +The webhook service will automatically retry the request if it fails due to network errors or if the server returns a 5xx status code. The number of retries and the wait time between retries can be configured using the `retryMax`, `retryWaitMin`, and `retryWaitMax` parameters. + +The wait time between retries is between `retryWaitMin` and `retryWaitMax`. If all retries fail, the `Send` method will return an error. ## Configuration Use the following steps to configure webhook: -1 Register webhook in `argocd-notifications-cm` ConfigMap: +1 Register webhook in `argo-rollouts-notification-configmap` ConfigMap: ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.: | url: https:/// @@ -41,7 +50,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: template.github-commit-status: | webhook: @@ -67,13 +76,13 @@ metadata: ## Examples -### Set Github commit status +### Set GitHub commit status ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.github: | url: https://api.github.com @@ -88,7 +97,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.github: | url: https://api.github.com @@ -119,7 +128,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.jenkins: | url: http:///job//build?token= @@ -136,7 +145,7 @@ type: Opaque apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.form: | url: https://form.example.com @@ -157,7 +166,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: + name: argo-rollouts-notification-configmap data: service.webhook.slack_webhook: | url: https://hooks.slack.com/services/xxxxx diff --git a/docs/getting-started/alb/index.md b/docs/getting-started/alb/index.md index a158b64748..73b3856c9e 100644 --- a/docs/getting-started/alb/index.md +++ b/docs/getting-started/alb/index.md @@ -1,7 +1,7 @@ # Getting Started - AWS Load Balancer Controller This guide covers how Argo Rollouts integrates with the -[AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/) +[AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/) for traffic shaping. This guide builds upon the concepts of the [basic getting started guide](../../getting-started.md). ## Requirements @@ -48,7 +48,7 @@ This should be `canary.trafficRouting.alb.rootService` (if specified), otherwise use `canary.stableService`. ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-ingress @@ -58,20 +58,23 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - # serviceName must match either: canary.trafficRouting.alb.rootService (if specified), - # or canary.stableService (if rootService is omitted) - serviceName: rollouts-demo-root - # servicePort must be the value: use-annotation - # This instructs AWS Load Balancer Controller to look to annotations on how to direct traffic - servicePort: use-annotation + service: + # serviceName must match either: canary.trafficRouting.alb.rootService (if specified), + # or canary.stableService (if rootService is omitted) + name: rollouts-demo-root + # servicePort must be the value: use-annotation + # This instructs AWS Load Balancer Controller to look to annotations on how to direct traffic + port: + name: use-annotation ``` During an update, the Ingress will be injected with a [custom action annotation](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/ingress/annotations/#actions), which directs the ALB to splits traffic between the stable and canary Services referenced by the Rollout. -In this example, those Services are named: `rollouts-demo-stable` and `rollouts-demo-canary` +In this example, those Services are named: `rollouts-demo-stable` and `rollouts-demo-canary` respectively. Run the following commands to deploy: @@ -123,15 +126,15 @@ kubectl argo rollouts get rollout rollouts-demo ![Rollout ALB Paused](paused-rollout-alb.png) At this point, both the canary and stable version of the Rollout are running, with 5% of the -traffic directed to the canary. To understand how this works, inspect the listener rules for -the ALB. When looking at the listener rules, we see that the forward action weights +traffic directed to the canary. To understand how this works, inspect the listener rules for +the ALB. When looking at the listener rules, we see that the forward action weights have been modified by the controller to reflect the current weight of the canary. ![ALB Listener_Rules](alb-listener-rules.png) -The controller has added `rollouts-pod-template-hash` selector to the Services and -attached the same label to the Pods. Therefore, you can split the traffic by simply +The controller has added `rollouts-pod-template-hash` selector to the Services and +attached the same label to the Pods. Therefore, you can split the traffic by simply forwarding the requests to the Services according to the weights. - + As the Rollout progresses through steps, the forward action weights will be adjusted to match the current setWeight of the steps. diff --git a/docs/getting-started/alb/ingress.yaml b/docs/getting-started/alb/ingress.yaml index 1f1b66ca7d..a90c59c417 100644 --- a/docs/getting-started/alb/ingress.yaml +++ b/docs/getting-started/alb/ingress.yaml @@ -1,4 +1,4 @@ -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-ingress @@ -8,7 +8,10 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: rollouts-demo-root - servicePort: use-annotation + service: + name: rollouts-demo-root + port: + name: use-annotation diff --git a/docs/getting-started/mixed/index.md b/docs/getting-started/mixed/index.md index 4a6d12f02b..5c59f37098 100644 --- a/docs/getting-started/mixed/index.md +++ b/docs/getting-started/mixed/index.md @@ -4,11 +4,11 @@ Available since v1.2 This guide covers how Argo Rollouts integrates with multiple TrafficRoutings, using -[Linkerd](https://linkerd.io) and +[Linkerd](https://linkerd.io) and [NGINX Ingress Controller](https://github.com/kubernetes/ingress-nginx) for traffic shaping, but you should be able to produce any other combination between the existing trafficRouting options. -This guide builds upon the concepts of the [basic getting started guide](../../getting-started.md), +This guide builds upon the concepts of the [basic getting started guide](../../getting-started.md), [NGINX Guide](getting-started/nginx/index.md), and [SMI Guide](getting-started/smi/index.md). ## Requirements @@ -89,7 +89,7 @@ rule which has a backend targeting the Service referenced under `canary.stableSe In our example, that stable Service is named: `rollouts-demo-stable`: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-stable @@ -101,10 +101,13 @@ spec: http: paths: - path: / + pathType: Prefix backend: - # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollouts-demo-stable - servicePort: 80 + service: + # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field + name: rollouts-demo-stable + port: + number: 80 ``` Run the following commands to deploy: @@ -183,8 +186,8 @@ kubectl argo rollouts get rollout rollouts-demo ![Rollout Paused](../nginx/paused-rollout-nginx.png) At this point, both the canary and stable version of the Rollout are running, with 5% of the -traffic directed to the canary and 95% to the stable. When inspecting the TrafficSplit generated by -the controller, we see that the weight has been updated to reflect the current `setWeight: 5` step of +traffic directed to the canary and 95% to the stable. When inspecting the TrafficSplit generated by +the controller, we see that the weight has been updated to reflect the current `setWeight: 5` step of the canary deploy. ```yaml diff --git a/docs/getting-started/mixed/ingress.yaml b/docs/getting-started/mixed/ingress.yaml index 1020c7bb64..25ee1a2ed7 100644 --- a/docs/getting-started/mixed/ingress.yaml +++ b/docs/getting-started/mixed/ingress.yaml @@ -1,4 +1,4 @@ -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-stable @@ -10,7 +10,10 @@ spec: http: paths: - path: / + pathType: Prefix backend: - # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollouts-demo-stable - servicePort: 80 + service: + # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field + name: rollouts-demo-stable + port: + number: 80 diff --git a/docs/getting-started/nginx/index.md b/docs/getting-started/nginx/index.md index aaa89302db..5875ee62bb 100644 --- a/docs/getting-started/nginx/index.md +++ b/docs/getting-started/nginx/index.md @@ -1,7 +1,7 @@ # Getting Started - NGINX Ingress This guide covers how Argo Rollouts integrates with the -[NGINX Ingress Controller](https://github.com/kubernetes/ingress-nginx) for traffic shaping. +[NGINX Ingress Controller](https://github.com/kubernetes/ingress-nginx) for traffic shaping. This guide builds upon the concepts of the [basic getting started guide](../../getting-started.md). ## Requirements @@ -41,7 +41,7 @@ rule which has a backend targeting the Service referenced under `canary.stableSe In our example, that stable Service is named: `rollouts-demo-stable`: ```yaml -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-stable @@ -53,10 +53,13 @@ spec: http: paths: - path: / + pathType: Prefix backend: - # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollouts-demo-stable - servicePort: 80 + service: + # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field + name: rollouts-demo-stable + port: + number: 80 ``` Run the following commands to deploy: diff --git a/docs/getting-started/nginx/ingress.yaml b/docs/getting-started/nginx/ingress.yaml index 1020c7bb64..25ee1a2ed7 100644 --- a/docs/getting-started/nginx/ingress.yaml +++ b/docs/getting-started/nginx/ingress.yaml @@ -1,4 +1,4 @@ -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-stable @@ -10,7 +10,10 @@ spec: http: paths: - path: / + pathType: Prefix backend: - # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollouts-demo-stable - servicePort: 80 + service: + # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field + name: rollouts-demo-stable + port: + number: 80 diff --git a/docs/getting-started/smi/ingress.yaml b/docs/getting-started/smi/ingress.yaml index 52bbea5701..9a1eddb957 100644 --- a/docs/getting-started/smi/ingress.yaml +++ b/docs/getting-started/smi/ingress.yaml @@ -1,4 +1,4 @@ -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollouts-demo-stable @@ -16,7 +16,10 @@ spec: http: paths: - path: / + pathType: Prefix backend: - # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field - serviceName: rollouts-demo-stable - servicePort: 80 + service: + # Reference to a Service name, also specified in the Rollout spec.strategy.canary.stableService field + name: rollouts-demo-stable + port: + number: 80 diff --git a/docs/installation.md b/docs/installation.md index e144459f2e..7fae1d6d61 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -81,7 +81,21 @@ kubectl argo rollouts version ## Shell auto completion -The CLI can export shell completion code for several shells. +To enable auto completion for the plugin when used with `kubectl` (version 1.26 or newer), you need to create a shell script on your PATH called `kubectl_complete-argo-rollouts` which will provide the completions. + +```shell +cat <kubectl_complete-argo-rollouts +#!/usr/bin/env sh + +# Call the __complete command passing it all arguments +kubectl argo rollouts __complete "\$@" +EOF + +chmod +x kubectl_complete-argo-rollouts +sudo mv ./kubectl_complete-argo-rollouts /usr/local/bin/ +``` + +To enable auto completion for the CLI run as a standalone binary, the CLI can export shell completion code for several shells. For bash, ensure you have bash completions installed and enabled. To access completions in your current shell, run $ `source <(kubectl-argo-rollouts completion bash)`. Alternatively, write it to a file and source in `.bash_profile`. @@ -120,4 +134,3 @@ To upgrade Argo Rollouts: If deployments are happening while you upgrade the controller, then you shouldn't have any downtime. Current Rollouts will be paused and as soon as the new controller becomes active it will resume all in-flight deployments. - diff --git a/docs/migrating.md b/docs/migrating.md index 62e3494e3d..524b67444d 100644 --- a/docs/migrating.md +++ b/docs/migrating.md @@ -54,7 +54,12 @@ Instead of removing Deployment you can scale it down to zero and reference it fr 1. Create a Rollout resource. 1. Reference an existing Deployment using `workloadRef` field. -1. Scale-down an existing Deployment by changing `replicas` field of an existing Deployment to zero. +1. In the `workloadRef` field set the `scaleDown` attribute, which specifies how the Deployment should be scaled down. There are three options available: + * `never`: the Deployment is not scaled down + * `onsuccess`: the Deployment is scaled down after the Rollout becomes healthy + * `progressively`: as the Rollout is scaled up the Deployment is scaled down. + + Alternatively, manually scale down an existing Deployment by changing replicas field of an existing Deployment to zero. 1. To perform an update, the change should be made to the Pod template field of the Deployment. Below is an example of a Rollout resource referencing a Deployment. @@ -73,6 +78,7 @@ spec: apiVersion: apps/v1 kind: Deployment name: rollout-ref-deployment + scaleDown: onsuccess strategy: canary: steps: diff --git a/docs/plugins.md b/docs/plugins.md index 83048d4c2b..901bc79878 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -46,14 +46,20 @@ data: metricProviderPlugins: |- - name: "argoproj-labs/metrics" location: "file:///tmp/argo-rollouts/metric-plugin" + args: + - "--log-level" + - "debug" trafficRouterPlugins: |- - name: "argoproj-labs/nginx" location: "file:///tmp/argo-rollouts/traffic-plugin" + args: + - "--log-level" + - "debug" ``` As you can see there is a field called `name:` under both `metrics` or `trafficrouters` this is the first place where your -end users will need to configure the name of the plugin. The second location is either in the rollout object or the analysis -template which you can see the examples below. +end users will need to configure the name of the plugin. The second `location` is either in the rollout object or the analysis +template which you can see the examples below. The third `args` holds the command line arguments of the plugin. #### AnalysisTemplate Example ```yaml diff --git a/examples/notifications/kustomization.yaml b/examples/notifications/kustomization.yaml index df8b1ffb2f..73154065be 100644 --- a/examples/notifications/kustomization.yaml +++ b/examples/notifications/kustomization.yaml @@ -4,5 +4,5 @@ kind: Kustomization resources: - ../../manifests/notifications -patchesStrategicMerge: -- configmap.yaml \ No newline at end of file +patches: +- path: configmap.yaml diff --git a/experiments/analysisrun_test.go b/experiments/analysisrun_test.go index 9ed0834cfd..a50fdad861 100644 --- a/experiments/analysisrun_test.go +++ b/experiments/analysisrun_test.go @@ -734,3 +734,118 @@ func TestTerminateAnalysisRuns(t *testing.T) { patchedAr := f.getPatchedAnalysisRunAsObj(arPatchIdx) assert.True(t, patchedAr.Spec.Terminate) } + +// TestCreateAnalysisRunWithMetadataAndDryRun ensures we create the AnalysisRun with the appropriate labels, annotations, and dry-run options when provided in the experiment +func TestCreateAnalysisRunWithMetadataAndDryRun(t *testing.T) { + templates := generateTemplates("bar") + aTemplates := generateAnalysisTemplates("success-rate") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "success-rate", + TemplateName: aTemplates[0].Name, + }, + } + e.Status.Phase = v1alpha1.AnalysisPhaseRunning + e.Status.AvailableAt = now() + e.Spec.AnalysisRunMetadata = v1alpha1.AnalysisRunMetadata{ + Labels: map[string]string{ + "foo": "bar", + "foo2": "bar2", + }, + Annotations: map[string]string{ + "bar": "foo", + "bar2": "foo2", + }, + } + e.Spec.DryRun = []v1alpha1.DryRun{ + { + MetricName: "someMetric", + }, + { + MetricName: "someOtherMetric", + }, + } + rs := templateToRS(e, templates[0], 1) + ar := analysisTemplateToRun("success-rate", e, &aTemplates[0].Spec) + + f := newFixture(t, e, rs, &aTemplates[0]) + defer f.Close() + + analysisRunIdx := f.expectCreateAnalysisRunAction(ar) + patchIdx := f.expectPatchExperimentAction(e) + f.run(getKey(e, t)) + + patchedEx := f.getPatchedExperimentAsObj(patchIdx) + assert.Equal(t, v1alpha1.AnalysisPhasePending, patchedEx.Status.AnalysisRuns[0].Phase) + + analysisRun := f.getCreatedAnalysisRun(analysisRunIdx) + assert.Len(t, analysisRun.ObjectMeta.Labels, 2) + assert.Equal(t, analysisRun.ObjectMeta.Labels["foo"], "bar") + assert.Equal(t, analysisRun.ObjectMeta.Labels["foo2"], "bar2") + assert.Len(t, analysisRun.ObjectMeta.Annotations, 2) + assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar"], "foo") + assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar2"], "foo2") + + assert.Len(t, analysisRun.Spec.DryRun, 2) + assert.Equal(t, analysisRun.Spec.DryRun[0].MetricName, "someMetric") + assert.Equal(t, analysisRun.Spec.DryRun[1].MetricName, "someOtherMetric") +} + +// TestCreateAnalysisRunWithMetadataAndDryRunWithClusterScope tests the same thing as TestCreateAnalysisRunWithMetadataAndDryRun, with a cluster scope analysis template +func TestCreateAnalysisRunWithMetadataAndDryRunWithClusterScope(t *testing.T) { + templates := generateTemplates("bar") + aTemplates := generateClusterAnalysisTemplates("success-rate") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "success-rate", + TemplateName: aTemplates[0].Name, + ClusterScope: true, + }, + } + e.Status.Phase = v1alpha1.AnalysisPhaseRunning + e.Status.AvailableAt = now() + e.Spec.AnalysisRunMetadata = v1alpha1.AnalysisRunMetadata{ + Labels: map[string]string{ + "foo": "bar", + "foo2": "bar2", + }, + Annotations: map[string]string{ + "bar": "foo", + "bar2": "foo2", + }, + } + e.Spec.DryRun = []v1alpha1.DryRun{ + { + MetricName: "someMetric", + }, + { + MetricName: "someOtherMetric", + }, + } + rs := templateToRS(e, templates[0], 1) + ar := analysisTemplateToRun("success-rate", e, &aTemplates[0].Spec) + + f := newFixture(t, e, rs, &aTemplates[0]) + defer f.Close() + + analysisRunIdx := f.expectCreateAnalysisRunAction(ar) + patchIdx := f.expectPatchExperimentAction(e) + f.run(getKey(e, t)) + + patchedEx := f.getPatchedExperimentAsObj(patchIdx) + assert.Equal(t, v1alpha1.AnalysisPhasePending, patchedEx.Status.AnalysisRuns[0].Phase) + + analysisRun := f.getCreatedAnalysisRun(analysisRunIdx) + assert.Len(t, analysisRun.ObjectMeta.Labels, 2) + assert.Equal(t, analysisRun.ObjectMeta.Labels["foo"], "bar") + assert.Equal(t, analysisRun.ObjectMeta.Labels["foo2"], "bar2") + assert.Len(t, analysisRun.ObjectMeta.Annotations, 2) + assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar"], "foo") + assert.Equal(t, analysisRun.ObjectMeta.Annotations["bar2"], "foo2") + + assert.Len(t, analysisRun.Spec.DryRun, 2) + assert.Equal(t, analysisRun.Spec.DryRun[0].MetricName, "someMetric") + assert.Equal(t, analysisRun.Spec.DryRun[1].MetricName, "someOtherMetric") +} diff --git a/experiments/controller.go b/experiments/controller.go index a42821aab0..3aa1519e46 100644 --- a/experiments/controller.go +++ b/experiments/controller.go @@ -63,8 +63,8 @@ type Controller struct { metricsServer *metrics.MetricsServer // used for unit testing - enqueueExperiment func(obj interface{}) - enqueueExperimentAfter func(obj interface{}, duration time.Duration) + enqueueExperiment func(obj any) + enqueueExperimentAfter func(obj any, duration time.Duration) // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This @@ -127,10 +127,10 @@ func NewController(cfg ControllerConfig) *Controller { resyncPeriod: cfg.ResyncPeriod, } - controller.enqueueExperiment = func(obj interface{}) { + controller.enqueueExperiment = func(obj any) { controllerutil.Enqueue(obj, cfg.ExperimentWorkQueue) } - controller.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) { + controller.enqueueExperimentAfter = func(obj any, duration time.Duration) { controllerutil.EnqueueAfter(obj, duration, cfg.ExperimentWorkQueue) } @@ -138,20 +138,20 @@ func NewController(cfg ControllerConfig) *Controller { // Set up an event handler for when experiment resources change cfg.ExperimentsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueExperiment, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { controller.enqueueExperiment(new) }, DeleteFunc: controller.enqueueExperiment, }) cfg.ExperimentsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - enqueueRollout := func(obj interface{}) { + AddFunc: func(obj any) { + enqueueRollout := func(obj any) { controllerutil.Enqueue(obj, cfg.RolloutWorkQueue) } controllerutil.EnqueueParentObject(obj, register.RolloutKind, enqueueRollout) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { oldAcc, err := meta.Accessor(old) if err != nil { return @@ -165,13 +165,13 @@ func NewController(cfg ControllerConfig) *Controller { // Two different versions of the same Replica will always have different RVs. return } - enqueueRollout := func(obj interface{}) { + enqueueRollout := func(obj any) { controllerutil.Enqueue(obj, cfg.RolloutWorkQueue) } controllerutil.EnqueueParentObject(new, register.RolloutKind, enqueueRollout) }, - DeleteFunc: func(obj interface{}) { - enqueueRollout := func(obj interface{}) { + DeleteFunc: func(obj any) { + enqueueRollout := func(obj any) { controllerutil.Enqueue(obj, cfg.RolloutWorkQueue) } controllerutil.EnqueueParentObject(obj, register.RolloutKind, enqueueRollout) @@ -184,10 +184,10 @@ func NewController(cfg ControllerConfig) *Controller { }) cfg.ReplicaSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.ExperimentKind, controller.enqueueExperiment) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { newRS := new.(*appsv1.ReplicaSet) oldRS := old.(*appsv1.ReplicaSet) if newRS.ResourceVersion == oldRS.ResourceVersion { @@ -204,19 +204,19 @@ func NewController(cfg ControllerConfig) *Controller { } controllerutil.EnqueueParentObject(new, register.ExperimentKind, controller.enqueueExperiment) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.ExperimentKind, controller.enqueueExperiment) }, }) cfg.AnalysisRunInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controller.enqueueIfCompleted(obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { + UpdateFunc: func(oldObj, newObj any) { controller.enqueueIfCompleted(newObj) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controller.enqueueIfCompleted(obj) }, }) @@ -346,7 +346,7 @@ func (ec *Controller) persistExperimentStatus(orig *v1alpha1.Experiment, newStat } // enqueueIfCompleted conditionally enqueues the AnalysisRun's Experiment if the run is complete -func (ec *Controller) enqueueIfCompleted(obj interface{}) { +func (ec *Controller) enqueueIfCompleted(obj any) { run := unstructuredutil.ObjectToAnalysisRun(obj) if run == nil { return diff --git a/experiments/controller_test.go b/experiments/controller_test.go index 9babd92d21..9365480cb9 100644 --- a/experiments/controller_test.go +++ b/experiments/controller_test.go @@ -116,13 +116,15 @@ func newFixture(t *testing.T, objects ...runtime.Object) *fixture { f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) f.enqueuedObjects = make(map[string]int) now := time.Now() - timeutil.Now = func() time.Time { + + timeutil.SetNowTimeFunc(func() time.Time { return now - } + }) f.unfreezeTime = func() error { - timeutil.Now = time.Now + timeutil.SetNowTimeFunc(time.Now) return nil } + return f } @@ -301,13 +303,13 @@ func generateRSName(ex *v1alpha1.Experiment, template v1alpha1.TemplateSpec) str return fmt.Sprintf("%s-%s", ex.Name, template.Name) } -func calculatePatch(ex *v1alpha1.Experiment, patch string, templates []v1alpha1.TemplateStatus, condition *v1alpha1.ExperimentCondition) string { - patchMap := make(map[string]interface{}) +func calculatePatch(ex *v1alpha1.Experiment, patch string, templates []v1alpha1.TemplateStatus, condition *v1alpha1.ExperimentCondition, analysisRuns []*v1alpha1.ExperimentAnalysisRunStatus, message string) string { + patchMap := make(map[string]any) err := json.Unmarshal([]byte(patch), &patchMap) if err != nil { panic(err) } - newStatus := patchMap["status"].(map[string]interface{}) + newStatus := patchMap["status"].(map[string]any) if templates != nil { newStatus["templateStatuses"] = templates patchMap["status"] = newStatus @@ -316,6 +318,14 @@ func calculatePatch(ex *v1alpha1.Experiment, patch string, templates []v1alpha1. newStatus["conditions"] = []v1alpha1.ExperimentCondition{*condition} patchMap["status"] = newStatus } + if analysisRuns != nil { + newStatus["analysisRuns"] = analysisRuns + patchMap["status"] = newStatus + } + if message != "" { + newStatus["message"] = message + patchMap["status"] = newStatus + } patchBytes, err := json.Marshal(patchMap) if err != nil { @@ -334,7 +344,7 @@ func calculatePatch(ex *v1alpha1.Experiment, patch string, templates []v1alpha1. newEx := &v1alpha1.Experiment{} json.Unmarshal(newBytes, newEx) - newPatch := make(map[string]interface{}) + newPatch := make(map[string]any) json.Unmarshal(patchBytes, &newPatch) newPatchBytes, _ := json.Marshal(newPatch) return string(newPatchBytes) @@ -380,7 +390,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share }) var enqueuedObjectsLock sync.Mutex - c.enqueueExperiment = func(obj interface{}) { + c.enqueueExperiment = func(obj any) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { @@ -396,7 +406,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share f.enqueuedObjects[key] = count c.experimentWorkqueue.Add(obj) } - c.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) { + c.enqueueExperimentAfter = func(obj any, duration time.Duration) { c.enqueueExperiment(obj) } @@ -804,7 +814,7 @@ func TestAddInvalidSpec(t *testing.T) { expectedPatch := calculatePatch(e, `{ "status":{ } - }`, nil, cond) + }`, nil, cond, nil, "") assert.JSONEq(t, expectedPatch, patch) } @@ -851,7 +861,7 @@ func TestUpdateInvalidSpec(t *testing.T) { expectedPatch := calculatePatch(e, `{ "status":{ } - }`, nil, cond) + }`, nil, cond, nil, "") assert.JSONEq(t, expectedPatch, patch) } @@ -891,7 +901,7 @@ func TestRemoveInvalidSpec(t *testing.T) { expectedPatch := calculatePatch(e, `{ "status":{ } - }`, templateStatus, cond) + }`, templateStatus, cond, nil, "") assert.JSONEq(t, expectedPatch, patch) } diff --git a/experiments/experiment.go b/experiments/experiment.go index 7327c5a8b6..1b40464015 100644 --- a/experiments/experiment.go +++ b/experiments/experiment.go @@ -46,7 +46,7 @@ type experimentContext struct { replicaSetLister appslisters.ReplicaSetLister serviceLister v1.ServiceLister recorder record.EventRecorder - enqueueExperimentAfter func(obj interface{}, duration time.Duration) + enqueueExperimentAfter func(obj any, duration time.Duration) resyncPeriod time.Duration // calculated values during reconciliation @@ -70,7 +70,7 @@ func newExperimentContext( serviceLister v1.ServiceLister, recorder record.EventRecorder, resyncPeriod time.Duration, - enqueueExperimentAfter func(obj interface{}, duration time.Duration), + enqueueExperimentAfter func(obj any, duration time.Duration), ) *experimentContext { exCtx := experimentContext{ @@ -101,7 +101,7 @@ func (ec *experimentContext) reconcile() *v1alpha1.ExperimentStatus { } for _, analysis := range ec.ex.Spec.Analyses { - ec.reconcileAnalysisRun(analysis, ec.ex.Spec.DryRun, ec.ex.Spec.MeasurementRetention) + ec.reconcileAnalysisRun(analysis, ec.ex.Spec.DryRun, ec.ex.Spec.MeasurementRetention, &ec.ex.Spec.AnalysisRunMetadata) } newStatus := ec.calculateStatus() @@ -390,7 +390,7 @@ func calculateEnqueueDuration(ex *v1alpha1.Experiment, newStatus *v1alpha1.Exper // reconcileAnalysisRun reconciles a single analysis run, creating or terminating it as necessary. // Updates the analysis run statuses, which may subsequently fail the experiment. -func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) { +func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) { logCtx := ec.log.WithField("analysis", analysis.Name) logCtx.Infof("Reconciling analysis") prevStatus := experimentutil.GetAnalysisRunStatus(ec.ex.Status, analysis.Name) @@ -446,7 +446,7 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn logCtx.Warnf("Skipping AnalysisRun creation for analysis %s: experiment is terminating", analysis.Name) return } - run, err := ec.createAnalysisRun(analysis, dryRunMetrics, measurementRetentionMetrics) + run, err := ec.createAnalysisRun(analysis, dryRunMetrics, measurementRetentionMetrics, analysisRunMetadata) if err != nil { msg := fmt.Sprintf("Failed to create AnalysisRun for analysis '%s': %v", analysis.Name, err.Error()) newStatus.Phase = v1alpha1.AnalysisPhaseError @@ -493,13 +493,13 @@ func (ec *experimentContext) reconcileAnalysisRun(analysis v1alpha1.ExperimentAn // createAnalysisRun creates the analysis run. If an existing runs exists with same name, is // semantically equal, and is not complete, returns the existing one, otherwise creates a new // run with a collision counter increase. -func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) { +func (ec *experimentContext) createAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) (*v1alpha1.AnalysisRun, error) { analysisRunIf := ec.argoProjClientset.ArgoprojV1alpha1().AnalysisRuns(ec.ex.Namespace) args, err := ec.ResolveAnalysisRunArgs(analysis.Args) if err != nil { return nil, err } - run, err := ec.newAnalysisRun(analysis, args, dryRunMetrics, measurementRetentionMetrics) + run, err := ec.newAnalysisRun(analysis, args, dryRunMetrics, measurementRetentionMetrics, analysisRunMetadata) if err != nil { return nil, err } @@ -635,47 +635,158 @@ func (ec *experimentContext) assessAnalysisRuns() (v1alpha1.AnalysisPhase, strin } // newAnalysisRun generates an AnalysisRun from the experiment and template -func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention) (*v1alpha1.AnalysisRun, error) { +func (ec *experimentContext) newAnalysisRun(analysis v1alpha1.ExperimentAnalysisTemplateRef, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, analysisRunMetadata *v1alpha1.AnalysisRunMetadata) (*v1alpha1.AnalysisRun, error) { if analysis.ClusterScope { - clusterTemplate, err := ec.clusterAnalysisTemplateLister.Get(analysis.TemplateName) + analysisTemplates, clusterAnalysisTemplates, err := ec.getAnalysisTemplatesFromClusterAnalysis(analysis) if err != nil { return nil, err } name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name) - clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{clusterTemplate} - run, err := analysisutil.NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace) - if err != nil { - return nil, err - } + runLabels := map[string]string{} + runAnnotations := map[string]string{} + instanceID := analysisutil.GetInstanceID(ec.ex) if instanceID != "" { - run.Labels = map[string]string{v1alpha1.LabelKeyControllerInstanceID: ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]} + runLabels[v1alpha1.LabelKeyControllerInstanceID] = ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID] + } + if analysisRunMetadata != nil { + for k, v := range analysisRunMetadata.Labels { + runLabels[k] = v + } + for k, v := range analysisRunMetadata.Annotations { + runAnnotations[k] = v + } + } + run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, runLabels, runAnnotations, name, "", ec.ex.Namespace) + if err != nil { + return nil, err } + run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ec.ex, controllerKind)} return run, nil } else { - template, err := ec.analysisTemplateLister.AnalysisTemplates(ec.ex.Namespace).Get(analysis.TemplateName) + analysisTemplates, clusterAnalysisTemplates, err := ec.getAnalysisTemplatesFromAnalysis(analysis) if err != nil { return nil, err } name := fmt.Sprintf("%s-%s", ec.ex.Name, analysis.Name) - analysisTemplates := []*v1alpha1.AnalysisTemplate{template} - run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, nil, args, dryRunMetrics, measurementRetentionMetrics, name, "", ec.ex.Namespace) - if err != nil { - return nil, err - } + runLabels := map[string]string{} + runAnnotations := map[string]string{} instanceID := analysisutil.GetInstanceID(ec.ex) if instanceID != "" { - run.Labels = map[string]string{v1alpha1.LabelKeyControllerInstanceID: ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID]} + runLabels[v1alpha1.LabelKeyControllerInstanceID] = ec.ex.Labels[v1alpha1.LabelKeyControllerInstanceID] + } + if analysisRunMetadata != nil { + for k, v := range analysisRunMetadata.Labels { + runLabels[k] = v + } + for k, v := range analysisRunMetadata.Annotations { + runAnnotations[k] = v + } + } + + run, err := analysisutil.NewAnalysisRunFromTemplates(analysisTemplates, clusterAnalysisTemplates, args, dryRunMetrics, measurementRetentionMetrics, runLabels, runAnnotations, name, "", ec.ex.Namespace) + if err != nil { + return nil, err } + run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ec.ex, controllerKind)} return run, nil } } +func (ec *experimentContext) getAnalysisTemplatesFromClusterAnalysis(analysis v1alpha1.ExperimentAnalysisTemplateRef) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate, error) { + clusterTemplate, err := ec.clusterAnalysisTemplateLister.Get(analysis.TemplateName) + if err != nil { + return nil, nil, err + } + templates := make([]*v1alpha1.AnalysisTemplate, 0) + clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) + clusterTemplates = append(clusterTemplates, clusterTemplate) + + if clusterTemplate.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := ec.getAnalysisTemplatesFromRefs(&clusterTemplate.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } + uniqueTemplates, uniqueClusterTemplates := analysisutil.FilterUniqueTemplates(templates, clusterTemplates) + return uniqueTemplates, uniqueClusterTemplates, nil +} + +func (ec *experimentContext) getAnalysisTemplatesFromAnalysis(analysis v1alpha1.ExperimentAnalysisTemplateRef) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate, error) { + template, err := ec.analysisTemplateLister.AnalysisTemplates(ec.ex.Namespace).Get(analysis.TemplateName) + if err != nil { + return nil, nil, err + } + templates := make([]*v1alpha1.AnalysisTemplate, 0) + clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) + templates = append(templates, template) + + if template.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := ec.getAnalysisTemplatesFromRefs(&template.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } + uniqueTemplates, uniqueClusterTemplates := analysisutil.FilterUniqueTemplates(templates, clusterTemplates) + return uniqueTemplates, uniqueClusterTemplates, nil +} + +func (ec *experimentContext) getAnalysisTemplatesFromRefs(templateRefs *[]v1alpha1.AnalysisTemplateRef) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate, error) { + templates := make([]*v1alpha1.AnalysisTemplate, 0) + clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) + for _, templateRef := range *templateRefs { + if templateRef.ClusterScope { + template, err := ec.clusterAnalysisTemplateLister.Get(templateRef.TemplateName) + if err != nil { + if k8serrors.IsNotFound(err) { + ec.log.Warnf("ClusterAnalysisTemplate '%s' not found", templateRef.TemplateName) + } + return nil, nil, err + } + clusterTemplates = append(clusterTemplates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := ec.getAnalysisTemplatesFromRefs(&template.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } + } else { + template, err := ec.analysisTemplateLister.AnalysisTemplates(ec.ex.Namespace).Get(templateRef.TemplateName) + if err != nil { + if k8serrors.IsNotFound(err) { + ec.log.Warnf("AnalysisTemplate '%s' not found", templateRef.TemplateName) + } + return nil, nil, err + } + templates = append(templates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := ec.getAnalysisTemplatesFromRefs(&template.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } + } + + } + uniqueTemplates, uniqueClusterTemplates := analysisutil.FilterUniqueTemplates(templates, clusterTemplates) + return uniqueTemplates, uniqueClusterTemplates, nil +} + // verifyAnalysisTemplate verifies an AnalysisTemplate. For now, it simply means that it exists func (ec *experimentContext) verifyAnalysisTemplate(analysis v1alpha1.ExperimentAnalysisTemplateRef) error { _, err := ec.analysisTemplateLister.AnalysisTemplates(ec.ex.Namespace).Get(analysis.TemplateName) diff --git a/experiments/experiment_test.go b/experiments/experiment_test.go index ae8df335af..a21e8146d8 100644 --- a/experiments/experiment_test.go +++ b/experiments/experiment_test.go @@ -62,7 +62,7 @@ func newTestContext(ex *v1alpha1.Experiment, objects ...runtime.Object) *experim serviceLister, record.NewFakeEventRecorder(), noResyncPeriodFunc(), - func(obj interface{}, duration time.Duration) {}, + func(obj any, duration time.Duration) {}, ) } @@ -97,7 +97,7 @@ func TestSetExperimentToPending(t *testing.T) { "status":{ "phase": "Pending" } - }`, templateStatus, cond) + }`, templateStatus, cond, nil, "") assert.Equal(t, expectedPatch, patch) } @@ -281,7 +281,7 @@ func TestSuccessAfterDurationPasses(t *testing.T) { "status":{ "phase": "Successful" } - }`, templateStatuses, cond) + }`, templateStatuses, cond, nil, "") assert.JSONEq(t, expectedPatch, patch) } @@ -302,7 +302,7 @@ func TestDontRequeueWithoutDuration(t *testing.T) { fakeClient := exCtx.kubeclientset.(*k8sfake.Clientset) fakeClient.Tracker().Add(rs1) enqueueCalled := false - exCtx.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) { + exCtx.enqueueExperimentAfter = func(obj any, duration time.Duration) { enqueueCalled = true } newStatus := exCtx.reconcile() @@ -325,7 +325,7 @@ func TestRequeueAfterDuration(t *testing.T) { "bar": rs1, } enqueueCalled := false - exCtx.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) { + exCtx.enqueueExperimentAfter = func(obj any, duration time.Duration) { enqueueCalled = true // ensures we are enqueued around ~20 seconds twentySeconds := time.Second * time.Duration(20) @@ -352,7 +352,7 @@ func TestRequeueAfterProgressDeadlineSeconds(t *testing.T) { "bar": rs1, } enqueueCalled := false - exCtx.enqueueExperimentAfter = func(obj interface{}, duration time.Duration) { + exCtx.enqueueExperimentAfter = func(obj any, duration time.Duration) { enqueueCalled = true // ensures we are enqueued around 10 minutes tenMinutes := time.Second * time.Duration(600) @@ -529,6 +529,7 @@ func TestServiceInheritPortsFromRS(t *testing.T) { assert.NotNil(t, exCtx.templateServices["bar"]) assert.Equal(t, exCtx.templateServices["bar"].Name, "foo-bar") assert.Equal(t, exCtx.templateServices["bar"].Spec.Ports[0].Port, int32(80)) + assert.Equal(t, exCtx.templateServices["bar"].Spec.Ports[0].Name, "testport") } func TestServiceNameSet(t *testing.T) { @@ -554,3 +555,420 @@ func TestServiceNameSet(t *testing.T) { assert.NotNil(t, exCtx.templateServices["bar"]) assert.Equal(t, exCtx.templateServices["bar"].Name, "service-name") } + +func TestCreatenalysisRunWithClusterTemplatesAndTemplateAndInnerTemplates(t *testing.T) { + + at := analysisTemplateWithNamespacedAnalysisRefs("bar", "bar2") + at2 := analysisTemplateWithClusterAnalysisRefs("bar2", "clusterbar", "clusterbar2") + cat := clusterAnalysisTemplateWithAnalysisRefs("clusterbar", "clusterbar2", "clusterbar3") + cat2 := clusterAnalysisTemplate("clusterbar2") + cat3 := clusterAnalysisTemplate("clusterbar3") + cat4 := clusterAnalysisTemplate("clusterbar4") + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "bar", + ClusterScope: false, + }, + { + Name: "exp-bar-2", + TemplateName: "clusterbar4", + ClusterScope: true, + }, + } + + e.Status = v1alpha1.ExperimentStatus{} + e.Status.AvailableAt = now() + e.Status.Phase = v1alpha1.AnalysisPhaseRunning + + cond := newCondition(conditions.ReplicaSetUpdatedReason, e) + + rs := templateToRS(e, templates[0], 0) + f := newFixture(t, e, rs, cat, cat2, cat3, cat4, at, at2) + defer f.Close() + + ar1 := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-exp-bar", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: concatMultipleSlices([][]v1alpha1.Metric{at.Spec.Metrics, at2.Spec.Metrics, cat.Spec.Metrics, cat2.Spec.Metrics, cat3.Spec.Metrics}), + DryRun: concatMultipleSlices([][]v1alpha1.DryRun{at.Spec.DryRun, at2.Spec.DryRun, cat.Spec.DryRun, cat2.Spec.DryRun, cat3.Spec.DryRun}), + Args: at.Spec.Args, + MeasurementRetention: concatMultipleSlices([][]v1alpha1.MeasurementRetention{at.Spec.MeasurementRetention, at2.Spec.MeasurementRetention, cat.Spec.MeasurementRetention, cat2.Spec.MeasurementRetention, cat3.Spec.MeasurementRetention}), + }, + } + ar2 := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-exp-bar-2", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: cat4.Spec.Metrics, + Args: cat4.Spec.Args, + DryRun: cat4.Spec.DryRun, + MeasurementRetention: cat4.Spec.MeasurementRetention, + }, + } + createdIndex1 := f.expectCreateAnalysisRunAction(ar1) + createdIndex2 := f.expectCreateAnalysisRunAction(ar2) + index := f.expectPatchExperimentAction(e) + + f.run(getKey(e, t)) + + createdAr1 := f.getCreatedAnalysisRun(createdIndex1) + createdAr2 := f.getCreatedAnalysisRun(createdIndex2) + + patch := f.getPatchedExperiment(index) + templateStatus := []v1alpha1.TemplateStatus{ + generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, nil), + } + analysisRun := []*v1alpha1.ExperimentAnalysisRunStatus{ + { + AnalysisRun: "foo-exp-bar", + Name: "exp-bar", + Phase: "Pending", + }, + { + AnalysisRun: "foo-exp-bar-2", + Name: "exp-bar-2", + Phase: "Pending", + }, + } + expectedPatch := calculatePatch(e, `{ + "status":{ + "phase": "Pending" + } + }`, templateStatus, cond, analysisRun, "") + assert.Equal(t, expectedPatch, patch) + + assert.Equal(t, "foo-exp-bar", createdAr1.Name) + assert.Len(t, createdAr1.Spec.Metrics, 5) + assert.Equal(t, "foo-exp-bar-2", createdAr2.Name) + assert.Len(t, createdAr2.Spec.Metrics, 1) +} + +func TestCreatenalysisRunWithTemplatesAndNoMetricsAtRoot(t *testing.T) { + + at := analysisTemplateWithOnlyNamespacedAnalysisRefs("bar", "bar2") + at2 := analysisTemplateWithClusterAnalysisRefs("bar2", "clusterbar", "clusterbar2") + cat := clusterAnalysisTemplateWithAnalysisRefs("clusterbar", "clusterbar2", "clusterbar3") + cat2 := clusterAnalysisTemplate("clusterbar2") + cat3 := clusterAnalysisTemplate("clusterbar3") + cat4 := clusterAnalysisTemplate("clusterbar4") + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "bar", + ClusterScope: false, + }, + { + Name: "exp-bar-2", + TemplateName: "clusterbar4", + ClusterScope: true, + }, + } + + e.Status = v1alpha1.ExperimentStatus{} + e.Status.AvailableAt = now() + e.Status.Phase = v1alpha1.AnalysisPhaseRunning + + cond := newCondition(conditions.ReplicaSetUpdatedReason, e) + + rs := templateToRS(e, templates[0], 0) + f := newFixture(t, e, rs, cat, cat2, cat3, cat4, at, at2) + defer f.Close() + + ar1 := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-exp-bar", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: concatMultipleSlices([][]v1alpha1.Metric{at2.Spec.Metrics, cat.Spec.Metrics, cat2.Spec.Metrics, cat3.Spec.Metrics}), + DryRun: concatMultipleSlices([][]v1alpha1.DryRun{at2.Spec.DryRun, cat.Spec.DryRun, cat2.Spec.DryRun, cat3.Spec.DryRun}), + Args: at.Spec.Args, + MeasurementRetention: concatMultipleSlices([][]v1alpha1.MeasurementRetention{at2.Spec.MeasurementRetention, cat.Spec.MeasurementRetention, cat2.Spec.MeasurementRetention, cat3.Spec.MeasurementRetention}), + }, + } + ar2 := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-exp-bar-2", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: cat4.Spec.Metrics, + Args: cat4.Spec.Args, + DryRun: cat4.Spec.DryRun, + MeasurementRetention: cat4.Spec.MeasurementRetention, + }, + } + createdIndex1 := f.expectCreateAnalysisRunAction(ar1) + createdIndex2 := f.expectCreateAnalysisRunAction(ar2) + index := f.expectPatchExperimentAction(e) + + f.run(getKey(e, t)) + + createdAr1 := f.getCreatedAnalysisRun(createdIndex1) + createdAr2 := f.getCreatedAnalysisRun(createdIndex2) + + patch := f.getPatchedExperiment(index) + templateStatus := []v1alpha1.TemplateStatus{ + generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, nil), + } + analysisRun := []*v1alpha1.ExperimentAnalysisRunStatus{ + { + AnalysisRun: "foo-exp-bar", + Name: "exp-bar", + Phase: "Pending", + }, + { + AnalysisRun: "foo-exp-bar-2", + Name: "exp-bar-2", + Phase: "Pending", + }, + } + expectedPatch := calculatePatch(e, `{ + "status":{ + "phase": "Pending" + } + }`, templateStatus, cond, analysisRun, "") + assert.Equal(t, expectedPatch, patch) + + assert.Equal(t, "foo-exp-bar", createdAr1.Name) + assert.Len(t, createdAr1.Spec.Metrics, 4) + assert.Equal(t, "foo-exp-bar-2", createdAr2.Name) + assert.Len(t, createdAr2.Spec.Metrics, 1) +} + +func TestAnalysisTemplateNotFoundShouldFailTheExperiment(t *testing.T) { + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "bar", + ClusterScope: false, + }, + } + + rs := templateToRS(e, templates[0], 0) + + expectFailureWithMessage(e, templates, t, "Failed to create AnalysisRun for analysis 'exp-bar': analysistemplate.argoproj.io \"bar\" not found", e, rs) +} + +func TestClusterAnalysisTemplateNotFoundShouldFailTheExperiment(t *testing.T) { + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "cluster-bar", + ClusterScope: true, + }, + } + + rs := templateToRS(e, templates[0], 0) + + expectFailureWithMessage(e, templates, t, "Failed to create AnalysisRun for analysis 'exp-bar': clusteranalysistemplate.argoproj.io \"cluster-bar\" not found", e, rs) +} + +func TestInnerAnalysisTemplateNotFoundShouldFailTheExperiment(t *testing.T) { + + at := analysisTemplateWithOnlyNamespacedAnalysisRefs("bar", "bar2") + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "bar", + ClusterScope: false, + }, + } + + rs := templateToRS(e, templates[0], 0) + + expectFailureWithMessage(e, templates, t, "Failed to create AnalysisRun for analysis 'exp-bar': analysistemplate.argoproj.io \"bar2\" not found", at, e, rs) +} + +func TestInnerClusterAnalysisTemplateNotFoundShouldFailTheExperiment(t *testing.T) { + + cat := clusterAnalysisTemplateWithAnalysisRefs("clusterbar", "clusterbar2", "clusterbar3") + cat2 := clusterAnalysisTemplate("clusterbar2") + + templates := generateTemplates("bar") + e := newExperiment("foo", templates, "") + e.Spec.Analyses = []v1alpha1.ExperimentAnalysisTemplateRef{ + { + Name: "exp-bar", + TemplateName: "clusterbar", + ClusterScope: true, + }, + } + rs := templateToRS(e, templates[0], 0) + + expectFailureWithMessage(e, templates, t, "Failed to create AnalysisRun for analysis 'exp-bar': clusteranalysistemplate.argoproj.io \"clusterbar3\" not found", cat, cat2, e, rs) +} + +func expectFailureWithMessage(e *v1alpha1.Experiment, templates []v1alpha1.TemplateSpec, t *testing.T, message string, objects ...runtime.Object) { + + e.Status = v1alpha1.ExperimentStatus{} + e.Status.AvailableAt = now() + e.Status.Phase = v1alpha1.AnalysisPhaseRunning + + cond := newCondition(conditions.ReplicaSetUpdatedReason, e) + + f := newFixture(t, objects...) + defer f.Close() + + index := f.expectPatchExperimentAction(e) + + f.run(getKey(e, t)) + + patch := f.getPatchedExperiment(index) + templateStatus := []v1alpha1.TemplateStatus{ + generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, nil), + } + analysisRun := []*v1alpha1.ExperimentAnalysisRunStatus{ + { + AnalysisRun: "", + Name: "exp-bar", + Message: message, + Phase: "Error", + }, + } + expectedPatch := calculatePatch(e, `{ + "status":{ + "phase": "Error" + } + }`, templateStatus, cond, analysisRun, message) + assert.Equal(t, expectedPatch, patch) +} + +func concatMultipleSlices[T any](slices [][]T) []T { + var totalLen int + + for _, s := range slices { + totalLen += len(s) + } + + result := make([]T, totalLen) + + var i int + + for _, s := range slices { + i += copy(result[i:], s) + } + + return result +} + +func analysisTemplateWithNamespacedAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithAnalysisRefs(name, false, innerRefsName...) +} + +func analysisTemplateWithClusterAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithAnalysisRefs(name, true, innerRefsName...) +} + +func analysisTemplateWithAnalysisRefs(name string, clusterScope bool, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: clusterScope, + }) + } + return &v1alpha1.AnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "example-" + name, + }}, + DryRun: []v1alpha1.DryRun{{ + MetricName: "example-" + name, + }}, + MeasurementRetention: []v1alpha1.MeasurementRetention{{ + MetricName: "example-" + name, + }}, + Templates: templatesRefs, + }, + } +} + +func analysisTemplateWithOnlyNamespacedAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithOnlyRefs(name, false, innerRefsName...) +} + +func analysisTemplateWithOnlyRefs(name string, clusterScope bool, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: clusterScope, + }) + } + return &v1alpha1.AnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{}, + DryRun: []v1alpha1.DryRun{}, + MeasurementRetention: []v1alpha1.MeasurementRetention{}, + Templates: templatesRefs, + }, + } +} + +func clusterAnalysisTemplate(name string) *v1alpha1.ClusterAnalysisTemplate { + return &v1alpha1.ClusterAnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "clusterexample-" + name, + }}, + }, + } +} + +func clusterAnalysisTemplateWithAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.ClusterAnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: true, + }) + } + return &v1alpha1.ClusterAnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "clusterexample-" + name, + }}, + Templates: templatesRefs, + }, + } +} diff --git a/experiments/replicaset_test.go b/experiments/replicaset_test.go index 030414f2df..a3d3e9e0a7 100644 --- a/experiments/replicaset_test.go +++ b/experiments/replicaset_test.go @@ -41,7 +41,7 @@ func TestCreateMultipleRS(t *testing.T) { expectedPatch := calculatePatch(e, `{ "status":{ } - }`, templateStatus, cond) + }`, templateStatus, cond, nil, "") assert.JSONEq(t, expectedPatch, patch) } @@ -72,7 +72,7 @@ func TestCreateMissingRS(t *testing.T) { generateTemplatesStatus("bar", 0, 0, v1alpha1.TemplateStatusProgressing, now()), generateTemplatesStatus("baz", 0, 0, v1alpha1.TemplateStatusProgressing, now()), } - assert.JSONEq(t, calculatePatch(e, expectedPatch, templateStatuses, cond), patch) + assert.JSONEq(t, calculatePatch(e, expectedPatch, templateStatuses, cond, nil, ""), patch) } func TestTemplateHasMultipleRS(t *testing.T) { diff --git a/go.mod b/go.mod index 2858c544a0..422327627b 100644 --- a/go.mod +++ b/go.mod @@ -1,76 +1,73 @@ module github.com/argoproj/argo-rollouts -go 1.20 +go 1.21 + +toolchain go1.21.8 require ( - github.com/antonmedv/expr v1.15.3 - github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee + github.com/antonmedv/expr v1.15.5 + github.com/argoproj/notifications-engine v0.4.1-0.20240219110818-7a069766e954 github.com/argoproj/pkg v0.13.6 - github.com/aws/aws-sdk-go-v2 v1.21.2 - github.com/aws/aws-sdk-go-v2/config v1.19.1 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0 + github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.27.10 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.37.0 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.30.5 + github.com/aws/smithy-go v1.20.2 github.com/blang/semver v3.5.1+incompatible - github.com/bombsimon/logrusr/v4 v4.0.0 - github.com/evanphx/json-patch/v5 v5.6.0 + github.com/bombsimon/logrusr/v4 v4.1.0 + github.com/evanphx/json-patch/v5 v5.9.0 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/go-plugin v1.4.10 - github.com/influxdata/influxdb-client-go/v2 v2.12.3 + github.com/hashicorp/go-plugin v1.6.0 + github.com/influxdata/influxdb-client-go/v2 v2.13.0 github.com/juju/ansiterm v1.0.0 github.com/machinebox/graphql v0.2.2 github.com/mitchellh/mapstructure v1.5.0 github.com/newrelic/newrelic-client-go v1.1.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.42.0 + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_model v0.6.0 + github.com/prometheus/common v0.47.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/servicemeshinterface/smi-sdk-go v0.5.0 github.com/sirupsen/logrus v1.9.3 github.com/soheilhy/cmux v0.1.5 github.com/spaceapegames/go-wavefront v1.8.1 - github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.4 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.9.0 github.com/tj/assert v0.0.3 github.com/valyala/fasttemplate v1.2.2 - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 - google.golang.org/grpc v1.57.0 - google.golang.org/protobuf v1.31.0 + golang.org/x/oauth2 v0.18.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 + google.golang.org/grpc v1.62.1 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.25.8 - k8s.io/apiextensions-apiserver v0.25.8 - k8s.io/apimachinery v0.25.8 - k8s.io/apiserver v0.25.8 - k8s.io/cli-runtime v0.25.8 - k8s.io/client-go v0.25.8 - k8s.io/code-generator v0.25.8 - k8s.io/component-base v0.25.8 - k8s.io/klog/v2 v2.80.1 - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 - k8s.io/kubectl v0.25.8 - k8s.io/kubernetes v1.25.8 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/apiserver v0.29.3 + k8s.io/cli-runtime v0.29.3 + k8s.io/client-go v0.29.3 + k8s.io/code-generator v0.29.3 + k8s.io/component-base v0.29.3 + k8s.io/klog/v2 v2.110.1 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/kubectl v0.29.3 + k8s.io/kubernetes v1.29.3 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/PagerDuty/go-pagerduty v1.7.0 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 // indirect github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 // indirect github.com/google/go-github/v41 v41.0.0 // indirect github.com/matryer/is v1.4.0 // indirect - github.com/russross/blackfriday v1.6.0 // indirect ) require ( @@ -79,60 +76,71 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/aws/aws-sdk-go v1.44.116 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect - github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect - github.com/aws/smithy-go v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.3 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/deepmap/oapi-codegen v1.11.0 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.7.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.4.2 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/cel-go v0.17.7 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-github/v53 v53.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregdel/pushover v1.2.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v0.14.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.1 // indirect - github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -144,91 +152,117 @@ require ( github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lunixbochs/vtclean v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/oapi-codegen/runtime v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/slack-go/slack v0.12.2 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fastjson v1.6.3 // indirect github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.opencensus.io v0.24.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/tools v0.16.1 // indirect gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect gomodules.xyz/notify v0.1.1 // indirect - google.golang.org/api v0.132.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/api v0.149.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/cloud-provider v0.0.0 // indirect k8s.io/cluster-bootstrap v0.25.8 // indirect - k8s.io/component-helpers v0.25.8 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/component-helpers v0.29.3 // indirect + k8s.io/controller-manager v0.29.3 // indirect + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect + k8s.io/kms v0.29.3 // indirect + k8s.io/kubelet v0.0.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace ( github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 - k8s.io/api v0.0.0 => k8s.io/api v0.25.8 - k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.25.8 - k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.25.8 - k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.25.8 - k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.25.8 - k8s.io/client-go v0.0.0 => k8s.io/client-go v0.25.8 - k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.25.8 - k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.25.8 - k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.25.8 - k8s.io/component-base v0.0.0 => k8s.io/component-base v0.25.8 - k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.25.8 - k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.25.8 - k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.25.8 - k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.25.8 - k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.25.8 - k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.25.8 - k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.25.8 - k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.25.8 - k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.25.8 - k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.25.8 - k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.25.8 - k8s.io/metrics v0.0.0 => k8s.io/metrics v0.25.8 - k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.25.8 - k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.25.8 - k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.25.8 + github.com/go-telegram-bot-api/telegram-bot-api/v5 => github.com/OvyFlash/telegram-bot-api/v5 v5.0.0-20240108230938-63e5c59035bf + k8s.io/api v0.0.0 => k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.29.3 + k8s.io/apiserver v0.0.0 => k8s.io/apiserver v0.29.3 + k8s.io/cli-runtime v0.0.0 => k8s.io/cli-runtime v0.29.3 + k8s.io/client-go v0.0.0 => k8s.io/client-go v0.29.3 + k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.29.3 + k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.29.3 + k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.29.3 + k8s.io/component-base v0.0.0 => k8s.io/component-base v0.29.3 + k8s.io/component-helpers v0.0.0 => k8s.io/component-helpers v0.29.3 + k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.29.3 + k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.29.3 + k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.29.3 + k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.29.3 + k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.29.3 + k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.29.3 + k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.29.3 + k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.3 + k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.29.3 + k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.29.3 + k8s.io/metrics v0.0.0 => k8s.io/metrics v0.29.3 + k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.29.3 + k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.29.3 + k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.29.3 ) diff --git a/go.sum b/go.sum index 674e220433..3aff874a74 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= @@ -38,29 +38,13 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= @@ -74,7 +58,10 @@ github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYr github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OvyFlash/telegram-bot-api/v5 v5.0.0-20240108230938-63e5c59035bf h1:a7VKhbjKYPO8twGy/1AxMpM2Fp0qT7bf25fmCVMVu4s= +github.com/OvyFlash/telegram-bot-api/v5 v5.0.0-20240108230938-63e5c59035bf/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= github.com/PagerDuty/go-pagerduty v1.7.0 h1:S1NcMKECxT5hJwV4VT+QzeSsSiv4oWl1s2821dUqG/8= github.com/PagerDuty/go-pagerduty v1.7.0/go.mod h1:PuFyJKRz1liIAH4h5KVXVD18Obpp1ZXRdxHvmGXooro= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= @@ -83,6 +70,7 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214 h1:MdZskg1II+YVe+9ss935i8+paqqf4KEuYcTYUWSwABI= github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20220708192748-b73dcb041214/go.mod h1:rjP7sIipbZcagro/6TCk6X0ZeFT2eyudH5+fve/cbBA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -91,57 +79,60 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antonmedv/expr v1.15.5 h1:y0Iz3cEwmpRz5/r3w4qQR0MfIqJGdGM1zbhD/v0G5Vg= +github.com/antonmedv/expr v1.15.5/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE= -github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee h1:ZYILioq4v6OIsr7uh0Pcx7JY4KpJ9qs8qbjRqM6HWMY= -github.com/argoproj/notifications-engine v0.4.1-0.20231011160156-2d2d1a75dbee/go.mod h1:VG9FXG0ddIVGc7NcSTRapaUjCPCYqOji//z6mmBYwCE= +github.com/argoproj/notifications-engine v0.4.1-0.20240219110818-7a069766e954 h1:4jbSTsw6/9pulz2eVoLnKtn75FYIeaLCNBOA1LjG1fA= +github.com/argoproj/notifications-engine v0.4.1-0.20240219110818-7a069766e954/go.mod h1:E4gOYnn452S8c10UucTztrZx/cTGU+jgMZiqfH9HUck= github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM= github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.39/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.44.116 h1:NpLIhcvLWXJZAEwvPj3TDHeqp7DleK6ZUVYyW01WNHY= github.com/aws/aws-sdk-go v1.44.116/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.20.1/go.mod h1:NU06lETsFm8fUC6ZjhgDpVBcGZTFQ6XM+LZWZxMI4ac= -github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= -github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2/config v1.19.1 h1:oe3vqcGftyk40icfLymhhhNysAwk0NfiwkDi2GTPMXs= -github.com/aws/aws-sdk-go-v2/config v1.19.1/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.38/go.mod h1:qggunOChCMu9ZF/UkAfhTz25+U2rLVb3ya0Ua6TTfCA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.32/go.mod h1:0ZXSqrty4FtQ7p8TEuRde/SZm9X05KT18LAUlR40Ln0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2 h1:HbEoy5QzXicnGgGWF4moCgsbio2xytgVQcs70xD3j3w= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.2/go.mod h1:Fc5ZJyxghsjGp1KqbLb2HTJjsJjSv6AXUikHUJYmCHM= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0 h1:lSCNS+ZMztgQWoLz/I27HdYjKlUaKEMWApM0dVOR/y8= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.21.0/go.mod h1:AZv/T0/2rhNBLiY2k109TT6HJ7Z0P8Z+SYvs0jqVkXE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= -github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0 h1:tQoMg8i4nFAB70cJ4wiAYEiZRYo2P6uDmU2D6ys/igo= -github.com/aws/aws-sdk-go-v2/service/sqs v1.20.0/go.mod h1:jQhN5f4p3PALMNlUtfb/0wGIFlV7vGtJlPDVfxfNfPY= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.14.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= -github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/config v1.27.10 h1:PS+65jThT0T/snC5WjyfHHyUgG+eBoupSDV+f838cro= +github.com/aws/aws-sdk-go-v2/config v1.27.10/go.mod h1:BePM7Vo4OBpHreKRUMuDXX+/+JWP38FLkzl5m27/Jjs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10 h1:qDZ3EA2lv1KangvQB6y258OssCHD0xvaGiEDkG4X/10= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10/go.mod h1:6t3sucOaYDwDssHQa0ojH1RpmVmF5/jArkye1b2FKMI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.37.0 h1:sGGUnU/pUSzjrcCvQgN2pEc3aTQILyK2rRsWVY5CSt0= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.37.0/go.mod h1:U12sr6Lt14X96f16t+rR52+2BdqtydwN7DjEEHRMjO0= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.30.5 h1:/x2u/TOx+n17U+gz98TOw1HKJom0EOqrhL4SjrHr0cQ= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.30.5/go.mod h1:e1McVqsud0JOERidvppLEHnuCdh/X6MRyL5L0LseAUk= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7 h1:tRNrFDGRm81e6nTX5Q4CFblea99eAfm0dxXazGpLceU= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.7/go.mod h1:8GWUDux5Z2h6z2efAtr54RdHXtLm8sq7Rg85ZNY/CZM= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 h1:WzFol5Cd+yDxPAdnzTA5LmpHYSWinhmSj4rQChV0ee8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -150,17 +141,20 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bombsimon/logrusr/v4 v4.0.0 h1:Pm0InGphX0wMhPqC02t31onlq9OVyJ98eP/Vh63t1Oo= -github.com/bombsimon/logrusr/v4 v4.0.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4= +github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 h1:yaYcGQ7yEIGbsJfW/9z7v1sLiZg/5rSNNXwmMct5XaE= github.com/bradleyfalzon/ghinstallation/v2 v2.5.0/go.mod h1:amcvPQMrRkWNdueWOjPytGL25xQGzox7425qMgzo+Vo= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwmarrin/discordgo v0.19.0/go.mod h1:O9S4p+ofTFwB02em7jkpkV8M3R0/PUVOwN61zSZ0r4Q= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -174,54 +168,50 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/deepmap/oapi-codegen v1.11.0 h1:f/X2NdIkaBKsSdpeuwLnY/vDI0AtPUrmB5LMgc7YD+A= -github.com/deepmap/oapi-codegen v1.11.0/go.mod h1:k+ujhoQGxmQYBZBbxhOZNZf4j08qv5mC+OH+fFTnKxM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20220417044921-416226498f94 h1:VIy7cdK7ufs7ctpTFkXJHm1uP3dJSnCGSPysEICB1so= +github.com/elazarl/goproxy v0.0.0-20220417044921-416226498f94/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= @@ -231,18 +221,16 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getkin/kin-openapi v0.94.0/go.mod h1:LWZfzOd7PRy8GJ1dJ6mCU6tNdSfOwRac1BUPam4aw6Q= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= -github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -257,53 +245,47 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= -github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -337,15 +319,16 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -360,8 +343,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= github.com/google/go-github/v53 v53.0.0 h1:T1RyHbSnpHYnoF0ZYKiIPSgPtuJ8G6vgc0MKodXsQDQ= @@ -382,17 +366,19 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -404,7 +390,6 @@ github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4 h1:4EZlYQIiyecYJlUbV github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4/go.mod h1:lEO7XoHJ/xNRBCxrn4h/CEB67h0kW1B0t4ooP2yrjUA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -414,8 +399,14 @@ github.com/gregdel/pushover v1.2.1/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -423,15 +414,15 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= -github.com/hashicorp/go-plugin v1.4.10/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-retryablehttp v0.5.1/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= @@ -444,25 +435,27 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb-client-go/v2 v2.12.3 h1:28nRlNMRIV4QbtIUvxhWqaxn0IpXeMSkY/uJa/O/vC4= -github.com/influxdata/influxdb-client-go/v2 v2.12.3/go.mod h1:IrrLUbCjjfkmRuaCiGQg4m2GbkaeJDcuWoxiWdQEbA0= +github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM= +github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/jaytaylor/html2text v0.0.0-20190408195923-01ec452cbe43/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -472,6 +465,7 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ansiterm v1.0.0 h1:gmMvnZRq7JZJx6jkfSq9/+2LMrVEwGwt7UR6G+lmDEg= github.com/juju/ansiterm v1.0.0/go.mod h1:PyXUpnI3olx3bsPcHt98FGPX/KCFZ1Fi+hw1XLI6384= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -486,25 +480,13 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= -github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= -github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= -github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= -github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= -github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= -github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= @@ -518,27 +500,24 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.10/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.29/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= @@ -557,8 +536,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -575,12 +554,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/newrelic/newrelic-client-go v1.1.0 h1:aflNjzQ21c+2GwBVh+UbAf9lznkRfCcVABoc5UM4IXw= github.com/newrelic/newrelic-client-go v1.1.0/go.mod h1:RYMXt7hgYw7nzuXIGd2BH0F1AivgWw7WrBhNBQZEB4k= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nlopes/slack v0.5.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= +github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -593,21 +574,22 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13 h1:nV98dkBpqaYbDnhefmOQ+Rn4hE+jD6AtjYHXaU5WyJI= github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.13/go.mod h1:4OjcxgwdXzezqytxN534MooNmrxRD50geWZxTD7845s= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -618,39 +600,38 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/servicemeshinterface/smi-sdk-go v0.5.0 h1:9cZdhvGbGDlmnp9qqmcQL+RL6KZ3IzHfDLoA5Axg8n0= github.com/servicemeshinterface/smi-sdk-go v0.5.0/go.mod h1:nm1Slf3pfaZPP3g2tE/K5wDmQ1uWVSP0p3uu5rQAQLc= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= @@ -672,27 +653,29 @@ github.com/sony/sonyflake v1.0.0 h1:MpU6Ro7tfXwgn2l5eluf9xQvQJDROTBImNCfRXn/YeM= github.com/sony/sonyflake v1.0.0/go.mod h1:Jv3cfhf/UFtolOTTRd3q4Nl6ENqM+KfyZ5PseKfZGF4= github.com/spaceapegames/go-wavefront v1.8.1 h1:Xuby0uBfw1WVxD9d+l8Gh+zINqnBfd0RJT8e/3i3vBM= github.com/spaceapegames/go-wavefront v1.8.1/go.mod h1:GtdIjtJ0URkfPmaKx0+7vMSDvT/MON9v+4pbdagA8As= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -703,37 +686,48 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 h1:qqllXPzXh+So+mmANlX/gCJrgo+1kQyshMoQ+NASzm0= github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0/go.mod h1:2rx5KE5FLD0HRfkkpyn8JwbVLBdhgeiOb2D2D9LLKM4= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -741,9 +735,37 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -758,15 +780,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -777,6 +795,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -798,10 +818,10 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -843,18 +863,14 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -862,8 +878,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -877,7 +893,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -898,7 +915,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -934,39 +950,35 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= @@ -974,13 +986,11 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -998,12 +1008,12 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1033,15 +1043,14 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 h1:juzzlx91nWAOsHuOVfXZPMXHtJEKouZvY9bBbwlOeYs= gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45/go.mod h1:41y72mzHT7+jFNgyBpJRrZWuZJcLmLrTpq6iGgOFJMQ= gomodules.xyz/notify v0.1.1 h1:1tTuoyswmPvzqPCTEDQK8SZ3ukCxLsonAAwst2+y1a0= @@ -1063,16 +1072,17 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= -google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1103,13 +1113,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1124,11 +1133,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1141,17 +1147,15 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1161,6 +1165,8 @@ gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkp gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1173,13 +1179,10 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1189,69 +1192,79 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.8/go.mod h1:N++Llhs8kCixMUoCaXXAyMMPbo8dDVnh+IQ36xZV2/0= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= -k8s.io/api v0.25.8 h1:pcbnWkCcmjNhp6OEKqR+ojO0CJydpOOw7WiWedjLJAU= -k8s.io/api v0.25.8/go.mod h1:FaJqAtI13XOERtpLOQTkW3SiSf0lqsUohYqaxCyHI18= -k8s.io/apiextensions-apiserver v0.25.8 h1:PBji7zCXwYoEabNcNOfvb3asd5LIwZKh1mowrbwn010= -k8s.io/apiextensions-apiserver v0.25.8/go.mod h1:3wN73ddXCwLTE1exhoBiWp5G3u6xRfoNt0cKTHZ5KGE= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= k8s.io/apimachinery v0.17.8/go.mod h1:Lg8zZ5iC/O8UjCqW6DNhcQG2m4TdjF9kwG3891OWbbA= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= -k8s.io/apimachinery v0.25.8 h1:c4kI9xm0U5nid8sBpBvM+2VHlv4Af8KnbhZIodZF/54= -k8s.io/apimachinery v0.25.8/go.mod h1:ZTl0drTQaFi5gMM3snYI5tWV1XJmRH1gfnDx2QCLsxk= -k8s.io/apiserver v0.25.8 h1:ZTYdLdouAu8D6h9QavMaQZiAV+EfWK87VGdOyb6RZMQ= -k8s.io/apiserver v0.25.8/go.mod h1:IJ1r0vqXxwa+3QbrxAHWqdmoGZnVDDMzWtIK9ju3maI= -k8s.io/cli-runtime v0.25.8 h1:3+I4zgdcY0KoCAWgqfQEMkhKOK35ailULxeTMcrBAfs= -k8s.io/cli-runtime v0.25.8/go.mod h1:Kbi+0tb9s/Gtsp3HfMJ/P20K3MYeC4t/CMDaV4pZiJQ= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= +k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= k8s.io/client-go v0.17.8/go.mod h1:SJsDS64AAtt9VZyeaQMb4Ck5etCitZ/FwajWdzua5eY= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= -k8s.io/client-go v0.25.8 h1:PruqsI6qccbowI5wjeNosyE1BiKViChRWVOvCZtYnXY= -k8s.io/client-go v0.25.8/go.mod h1:Wiu5CQCaOqWugLrdvl04HK90P0QMc4oxQ3BXoJGjD+A= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/cloud-provider v0.29.3 h1:y39hNq0lrPD1qmqQ2ykwMJGeWF9LsepVkR2a4wskwLc= +k8s.io/cloud-provider v0.29.3/go.mod h1:daDV1WkAO6pTrdsn7v8TpN/q9n75ExUC4RJDl7vlPKk= k8s.io/cluster-bootstrap v0.25.8 h1:2JoXlDAnki1rmYMdrExP5tYXJgJhCERYHtAbucjZgs8= k8s.io/cluster-bootstrap v0.25.8/go.mod h1:O7q/A8Os259t1Tm2S9Zn9XipZ9eej0AfApj1htCT0Lc= k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/code-generator v0.25.8 h1:rhj7PQgiTdDiV2D9Ep0wHRppQ/jrG7DDJ5vPpEtRtck= -k8s.io/code-generator v0.25.8/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= -k8s.io/component-base v0.25.8 h1:lQ5Ouw7lupdpXn5slRjAeHnlMK/aAEbPf9jjSWbOD3c= -k8s.io/component-base v0.25.8/go.mod h1:MkC9Lz4fXoGOgB2WhFBU4zjiviIEeJS3sVhTxX9vt6s= -k8s.io/component-helpers v0.25.8 h1:jTi68SNaCis1s4//S7CXOtmzIOqhiY5CUroZuD2+OEg= -k8s.io/component-helpers v0.25.8/go.mod h1:+EZENL02v1dJXJvAYXZfAldATLviWL7Y/K3Pw8LB3MU= +k8s.io/code-generator v0.29.3 h1:m7E25/t9R9NvejspO2zBdyu+/Gl0Z5m7dCRc680KS14= +k8s.io/code-generator v0.29.3/go.mod h1:x47ofBhN4gxYFcxeKA1PYXeaPreAGaDN85Y/lNUsPoM= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o= +k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos= +k8s.io/controller-manager v0.29.3 h1:pvm3mirypgW7kM6dHRk6O5ANZj4bZTWirfk5gO6RlCo= +k8s.io/controller-manager v0.29.3/go.mod h1:RNxpf0d1WAo59sOLd32isWJP0oZ7Zxr+q4VEEaSq4gk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.29.3 h1:ReljsAUhYlm2spdT4yXmY+9a8x8dc/OT4mXvwQPPteQ= +k8s.io/kms v0.29.3/go.mod h1:TBGbJKpRUMk59neTMDMddjIDL+D4HuFUbpuiuzmOPg0= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.25.8 h1:i6nlpU5LQyg9J19mKihJo1oE7FE7+Zg1cF4TOpJQmEQ= -k8s.io/kubectl v0.25.8/go.mod h1:IPmVLfTvFIZKl0vwyl0LkegIbk2jsnaVmkpDgDymCPI= -k8s.io/kubernetes v1.25.8 h1:RQ3Rf3aScxhg/xDT1GebWFHOtYodM83Q/Yxvgku39G4= -k8s.io/kubernetes v1.25.8/go.mod h1:mEIT8S9Ir6R4R8N6VLmfxcNFAmGU2hEtV780TuPYlug= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= +k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= +k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU= +k8s.io/kubelet v0.29.3/go.mod h1:jDiGuTkFOUynyBKzOoC1xRSWlgAZ9UPcTYeFyjr6vas= +k8s.io/kubernetes v1.29.3 h1:EuOAKN4zpiP+kBx/0e9yS5iBkPSyLml19juOqZxBtDw= +k8s.io/kubernetes v1.29.3/go.mod h1:CP+Z+S9haxyB7J+nV6ywYry4dqlphArPXjcc0CsBVXc= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/custom-boilerplate.go.txt b/hack/custom-boilerplate.go.txt index e4cd20e380..2b7dbd2b46 100644 --- a/hack/custom-boilerplate.go.txt +++ b/hack/custom-boilerplate.go.txt @@ -1,6 +1,4 @@ /* -Copyright YEAR The Kubernetes sample-controller Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/hack/gen-crd-spec/main.go b/hack/gen-crd-spec/main.go index 97156308a5..60c1bde9d7 100644 --- a/hack/gen-crd-spec/main.go +++ b/hack/gen-crd-spec/main.go @@ -31,7 +31,7 @@ const metadataValidation = `properties: type: object type: object` -var preserveUnknownFields = map[string]interface{}{ +var preserveUnknownFields = map[string]any{ "x-kubernetes-preserve-unknown-fields": true, } @@ -43,7 +43,7 @@ var crdPaths = map[string]string{ "AnalysisRun": "manifests/crds/analysis-run-crd.yaml", } -func setValidationOverride(un *unstructured.Unstructured, fieldOverride map[string]interface{}, path string) { +func setValidationOverride(un *unstructured.Unstructured, fieldOverride map[string]any, path string) { // Prepare variables preSchemaPath := []string{"spec", "versions"} objVersions, _, _ := unstructured.NestedSlice(un.Object, preSchemaPath...) @@ -59,11 +59,11 @@ func setValidationOverride(un *unstructured.Unstructured, fieldOverride map[stri } // Loop over version's slice - var finalOverride []interface{} + var finalOverride []any for _, v := range objVersions { - unstructured.SetNestedMap(v.(map[string]interface{}), fieldOverride, schemaPath...) + unstructured.SetNestedMap(v.(map[string]any), fieldOverride, schemaPath...) - _, ok, err := unstructured.NestedFieldNoCopy(v.(map[string]interface{}), schemaPath...) + _, ok, err := unstructured.NestedFieldNoCopy(v.(map[string]any), schemaPath...) checkErr(err) if !ok { panic(fmt.Sprintf("%s not found for kind %s", schemaPath, crdKind(un))) @@ -96,11 +96,12 @@ func NewCustomResourceDefinition() []*extensionsobj.CustomResourceDefinition { // clean up stuff left by controller-gen deleteFile("config/webhook/manifests.yaml") deleteFile("config/webhook") - deleteFile("config/argoproj.io_analysisruns.yaml") - deleteFile("config/argoproj.io_analysistemplates.yaml") - deleteFile("config/argoproj.io_clusteranalysistemplates.yaml") - deleteFile("config/argoproj.io_experiments.yaml") - deleteFile("config/argoproj.io_rollouts.yaml") + deleteFile("config/crd/argoproj.io_analysisruns.yaml") + deleteFile("config/crd/argoproj.io_analysistemplates.yaml") + deleteFile("config/crd/argoproj.io_clusteranalysistemplates.yaml") + deleteFile("config/crd/argoproj.io_experiments.yaml") + deleteFile("config/crd/argoproj.io_rollouts.yaml") + deleteFile("config/crd") deleteFile("config") crds := []*extensionsobj.CustomResourceDefinition{} @@ -153,7 +154,7 @@ func createMetadataValidation(un *unstructured.Unstructured) { switch kind { case "Rollout": - var roValidated []interface{} + var roValidated []any roPath := []string{ "template", "properties", @@ -161,12 +162,12 @@ func createMetadataValidation(un *unstructured.Unstructured) { } roPath = append(path, roPath...) for _, v := range objVersions { - unstructured.SetNestedMap(v.(map[string]interface{}), metadataValidationObj.Object, roPath...) + unstructured.SetNestedMap(v.(map[string]any), metadataValidationObj.Object, roPath...) roValidated = append(roValidated, v) } unstructured.SetNestedSlice(un.Object, roValidated, prePath...) case "Experiment": - var exValidated []interface{} + var exValidated []any exPath := []string{ "templates", "items", @@ -177,12 +178,12 @@ func createMetadataValidation(un *unstructured.Unstructured) { } exPath = append(path, exPath...) for _, v := range objVersions { - unstructured.SetNestedMap(v.(map[string]interface{}), metadataValidationObj.Object, exPath...) + unstructured.SetNestedMap(v.(map[string]any), metadataValidationObj.Object, exPath...) exValidated = append(exValidated, v) } unstructured.SetNestedSlice(un.Object, exValidated, prePath...) case "ClusterAnalysisTemplate", "AnalysisTemplate", "AnalysisRun": - var analysisValidated []interface{} + var analysisValidated []any analysisPath := []string{ "metrics", "items", @@ -196,12 +197,12 @@ func createMetadataValidation(un *unstructured.Unstructured) { analysisPathJobMetadata := append(analysisPath, "metadata") for _, v := range objVersions { - unstructured.SetNestedMap(v.(map[string]interface{}), metadataValidationObj.Object, analysisPathJobMetadata...) + unstructured.SetNestedMap(v.(map[string]any), metadataValidationObj.Object, analysisPathJobMetadata...) analysisValidated = append(analysisValidated, v) } unstructured.SetNestedSlice(un.Object, analysisValidated, prePath...) - var analysisJobValidated []interface{} + var analysisJobValidated []any analysisPathJobTemplateMetadata := []string{ "spec", "properties", @@ -211,7 +212,7 @@ func createMetadataValidation(un *unstructured.Unstructured) { } analysisPathJobTemplateMetadata = append(analysisPath, analysisPathJobTemplateMetadata...) for _, v := range objVersions { - unstructured.SetNestedMap(v.(map[string]interface{}), metadataValidationObj.Object, analysisPathJobTemplateMetadata...) + unstructured.SetNestedMap(v.(map[string]any), metadataValidationObj.Object, analysisPathJobTemplateMetadata...) analysisJobValidated = append(analysisJobValidated, v) } unstructured.SetNestedSlice(un.Object, analysisJobValidated, prePath...) @@ -322,11 +323,12 @@ var patchAnnotationKeys = map[string]bool{ "x-kubernetes-patch-strategy": true, "x-kubernetes-list-map-keys": true, "x-kubernetes-list-type": true, + "x-kubernetes-map-type": true, } // injectPatchAnnotations injects patch annotations from given schema definitions and drop properties that don't have // patch annotations injected -func injectPatchAnnotations(prop map[string]interface{}, propSchema spec.Schema, schemaDefinitions spec.Definitions) (bool, error) { +func injectPatchAnnotations(prop map[string]any, propSchema spec.Schema, schemaDefinitions spec.Definitions) (bool, error) { injected := false for k, v := range propSchema.Extensions { if patchAnnotationKeys[k] { @@ -349,13 +351,13 @@ func injectPatchAnnotations(prop map[string]interface{}, propSchema spec.Schema, propSchemas = schema.Properties } - childProps, ok := prop["properties"].(map[string]interface{}) + childProps, ok := prop["properties"].(map[string]any) if !ok { - childProps = map[string]interface{}{} + childProps = map[string]any{} } for k, v := range childProps { - childInjected, err := injectPatchAnnotations(v.(map[string]interface{}), propSchemas[k], schemaDefinitions) + childInjected, err := injectPatchAnnotations(v.(map[string]any), propSchemas[k], schemaDefinitions) if err != nil { return false, err } @@ -390,7 +392,7 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out schemaDefinitions[normalizeRef(k)] = v.Schema } - definitions := map[string]interface{}{} + definitions := map[string]any{} for _, crd := range crds { var version string var props map[string]extensionsobj.JSONSchemaProps @@ -406,7 +408,7 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out if err != nil { return err } - propsMap := map[string]interface{}{} + propsMap := map[string]any{} err = json.Unmarshal(data, &propsMap) if err != nil { return err @@ -414,7 +416,7 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out crdSchema := schemaDefinitions[normalizeRef(fmt.Sprintf("%s/%s.%s", rolloutsDefinitionsPrefix, version, crd.Spec.Names.Kind))] for k, p := range propsMap { - injected, err := injectPatchAnnotations(p.(map[string]interface{}), crdSchema.Properties[k], schemaDefinitions) + injected, err := injectPatchAnnotations(p.(map[string]any), crdSchema.Properties[k], schemaDefinitions) if err != nil { return err } @@ -426,7 +428,7 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out } definitionName := kubeopenapiutil.ToRESTFriendlyName(fmt.Sprintf("%s/%s.%s", crd.Spec.Group, version, crd.Spec.Names.Kind)) - definitions[definitionName] = map[string]interface{}{ + definitions[definitionName] = map[string]any{ "properties": propsMap, "x-kubernetes-group-version-kind": []map[string]string{{ "group": crd.Spec.Group, @@ -435,7 +437,7 @@ func generateKustomizeSchema(crds []*extensionsobj.CustomResourceDefinition, out }}, } } - data, err := json.MarshalIndent(map[string]interface{}{ + data, err := json.MarshalIndent(map[string]any{ "definitions": definitions, }, "", " ") if err != nil { diff --git a/hack/gen-docs/main.go b/hack/gen-docs/main.go index 4e29e355be..8ad71587e8 100644 --- a/hack/gen-docs/main.go +++ b/hack/gen-docs/main.go @@ -36,6 +36,12 @@ func generateNotificationsDocs() { if e := updateMkDocsNav("Notifications", "Services", files); e != nil { log.Fatal(e) } + if e := strReplaceDocFiles("argocd-notifications-cm", "argo-rollouts-notification-configmap", files); e != nil { + log.Fatal(e) + } + if e := strReplaceDocFiles("argocd-notifications-secret", "argo-rollouts-notification-secret", files); e != nil { + log.Fatal(e) + } } } @@ -73,15 +79,15 @@ func updateMkDocsNav(parent string, child string, files []string) error { if e := yaml.Unmarshal(data, &un.Object); e != nil { return e } - nav := un.Object["nav"].([]interface{}) + nav := un.Object["nav"].([]any) navitem, _ := findNavItem(nav, parent) if navitem == nil { return fmt.Errorf("Can't find '%s' nav item in mkdoc.yml", parent) } - navitemmap := navitem.(map[interface{}]interface{}) - subnav := navitemmap[parent].([]interface{}) + navitemmap := navitem.(map[any]any) + subnav := navitemmap[parent].([]any) subnav = removeNavItem(subnav, child) - commands := make(map[string]interface{}) + commands := make(map[string]any) commands[child] = files navitemmap[parent] = append(subnav, commands) @@ -92,9 +98,9 @@ func updateMkDocsNav(parent string, child string, files []string) error { return os.WriteFile("mkdocs.yml", newmkdocs, 0644) } -func findNavItem(nav []interface{}, key string) (interface{}, int) { +func findNavItem(nav []any, key string) (any, int) { for i, item := range nav { - o, ismap := item.(map[interface{}]interface{}) + o, ismap := item.(map[any]any) if ismap { if _, ok := o[key]; ok { return o, i @@ -104,7 +110,7 @@ func findNavItem(nav []interface{}, key string) (interface{}, int) { return nil, -1 } -func removeNavItem(nav []interface{}, key string) []interface{} { +func removeNavItem(nav []any, key string) []any { _, i := findNavItem(nav, key) if i != -1 { nav = append(nav[:i], nav[i+1:]...) @@ -264,6 +270,22 @@ func commandName(cmd string) string { return strings.Replace(cmd, "kubectl-argo-", "", 1) } +// strReplaceDocFiles replaces old string with new string in list of document files +func strReplaceDocFiles(old string, new string, files []string) error { + baseDir := "./docs/" + for _, file := range files { + data, err := os.ReadFile(baseDir + file) + if err != nil { + return err + } + newdata := strings.ReplaceAll(string(data), old, new) + if err := os.WriteFile(baseDir+file, []byte(newdata), 0644); err != nil { + return err + } + } + return nil +} + type byName []*cobra.Command func (s byName) Len() int { return len(s) } diff --git a/hack/installers/install-codegen-go-tools.sh b/hack/installers/install-codegen-go-tools.sh index d6c7d5b864..9630fd615b 100755 --- a/hack/installers/install-codegen-go-tools.sh +++ b/hack/installers/install-codegen-go-tools.sh @@ -45,7 +45,7 @@ go_mod_install k8s.io/code-generator/cmd/lister-gen go_mod_install k8s.io/kube-openapi/cmd/openapi-gen # controller-gen is run by ./hack/gen-crd-spec to generate the CRDs -go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.12.1 +go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 # swagger cli is used to generate swagger docs go install github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 diff --git a/hack/swagger-codegen.sh b/hack/swagger-codegen.sh index 8b5dc9c3d2..9ba2cf6634 100755 --- a/hack/swagger-codegen.sh +++ b/hack/swagger-codegen.sh @@ -3,7 +3,7 @@ export SWAGGER_CODEGEN_VERSION=3.0.25 PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/..; pwd) -test -f "/tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" || \ - curl https://repo1.maven.org/maven2/io/swagger/codegen/v3/swagger-codegen-cli/${SWAGGER_CODEGEN_VERSION}/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar -o "/tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" +test -f "$PROJECT_ROOT/dist/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" || \ + curl https://repo1.maven.org/maven2/io/swagger/codegen/v3/swagger-codegen-cli/${SWAGGER_CODEGEN_VERSION}/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar -o "$PROJECT_ROOT/dist/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar" -docker run --rm -v /tmp:/tmp -v $PROJECT_ROOT:/src -w /src/ui -t maven:3-jdk-8 java -jar /tmp/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar $@ \ No newline at end of file +docker run --rm -v $PROJECT_ROOT:/src -w /src/ui -t maven:3-jdk-8 java -jar /src/dist/swagger-codegen-cli-${SWAGGER_CODEGEN_VERSION}.jar $@ \ No newline at end of file diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 6b1294143f..088e99875d 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -20,7 +20,7 @@ cleanup() { trap "cleanup" EXIT SIGINT -chmod +x ${CODEGEN_PKG}/generate-groups.sh +chmod +x ${CODEGEN_PKG}/*.sh ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ github.com/argoproj/argo-rollouts/pkg/client github.com/argoproj/argo-rollouts/pkg/apis \ diff --git a/ingress/ingress.go b/ingress/ingress.go index 6c4059e476..8af5327891 100644 --- a/ingress/ingress.go +++ b/ingress/ingress.go @@ -51,7 +51,7 @@ type Controller struct { ingressWorkqueue workqueue.RateLimitingInterface metricServer *metrics.MetricsServer - enqueueRollout func(obj interface{}) + enqueueRollout func(obj any) albClasses []string nginxClasses []string } @@ -76,7 +76,7 @@ func NewController(cfg ControllerConfig) *Controller { } util.CheckErr(cfg.RolloutsInformer.Informer().AddIndexers(cache.Indexers{ - ingressIndexName: func(obj interface{}) ([]string, error) { + ingressIndexName: func(obj any) ([]string, error) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { return ingressutil.GetRolloutIngressKeys(ro), nil } @@ -85,17 +85,17 @@ func NewController(cfg ControllerConfig) *Controller { })) cfg.IngressWrap.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controllerutil.Enqueue(obj, cfg.IngressWorkQueue) }, - UpdateFunc: func(oldObj, newObj interface{}) { + UpdateFunc: func(oldObj, newObj any) { controllerutil.Enqueue(newObj, cfg.IngressWorkQueue) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controllerutil.Enqueue(obj, cfg.IngressWorkQueue) }, }) - controller.enqueueRollout = func(obj interface{}) { + controller.enqueueRollout = func(obj any) { controllerutil.EnqueueRateLimited(obj, cfg.RolloutWorkQueue) } diff --git a/ingress/ingress_test.go b/ingress/ingress_test.go index 92ebcc09d4..be588dd51a 100644 --- a/ingress/ingress_test.go +++ b/ingress/ingress_test.go @@ -148,7 +148,7 @@ func underlyingControllerBuilder(t *testing.T, ing []*extensionsv1beta1.Ingress, }) enqueuedObjects := map[string]int{} var enqueuedObjectsLock sync.Mutex - c.enqueueRollout = func(obj interface{}) { + c.enqueueRollout = func(obj any) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { diff --git a/manifests/base/argo-rollouts-deployment.yaml b/manifests/base/argo-rollouts-deployment.yaml index 9718132785..7c8ebaf1ea 100644 --- a/manifests/base/argo-rollouts-deployment.yaml +++ b/manifests/base/argo-rollouts-deployment.yaml @@ -44,7 +44,28 @@ spec: failureThreshold: 3 successThreshold: 1 timeoutSeconds: 4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + resources: + limits: + ephemeral-storage: 1Gi + volumeMounts: + - name: plugin-bin + mountPath: /home/argo-rollouts/plugin-bin + - name: tmp + mountPath: /tmp securityContext: runAsNonRoot: true + volumes: + - name: plugin-bin + emptyDir: {} + - name: tmp + emptyDir: {} strategy: - type: Recreate + type: RollingUpdate diff --git a/manifests/crds/analysis-run-crd.yaml b/manifests/crds/analysis-run-crd.yaml index 112b5e7287..04539115a5 100644 --- a/manifests/crds/analysis-run-crd.yaml +++ b/manifests/crds/analysis-run-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: analysisruns.argoproj.io spec: group: argoproj.io @@ -178,14 +178,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -222,6 +243,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -229,6 +253,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -270,13 +297,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -448,6 +476,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -516,6 +554,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -582,6 +630,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -650,6 +708,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -829,6 +897,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -879,6 +955,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1075,13 +1159,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1434,6 +1545,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1484,6 +1603,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1680,13 +1807,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2046,6 +2200,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2096,6 +2258,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2292,13 +2462,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2524,12 +2721,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -2799,6 +3027,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -2847,6 +3088,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -2888,11 +3154,26 @@ spec: type: array terminate: type: boolean + ttlStrategy: + properties: + secondsAfterCompletion: + format: int32 + type: integer + secondsAfterFailure: + format: int32 + type: integer + secondsAfterSuccess: + format: int32 + type: integer + type: object required: - metrics type: object status: properties: + completedAt: + format: date-time + type: string dryRunSummary: properties: count: diff --git a/manifests/crds/analysis-template-crd.yaml b/manifests/crds/analysis-template-crd.yaml index 8f76efa195..ffb0da24c3 100644 --- a/manifests/crds/analysis-template-crd.yaml +++ b/manifests/crds/analysis-template-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: analysistemplates.argoproj.io spec: group: argoproj.io @@ -174,14 +174,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -218,6 +239,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -225,6 +249,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -266,13 +293,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -444,6 +472,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -512,6 +550,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -578,6 +626,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -646,6 +704,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -825,6 +893,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -875,6 +951,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1071,13 +1155,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1430,6 +1541,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1480,6 +1599,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1676,13 +1803,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2042,6 +2196,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2092,6 +2254,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2288,13 +2458,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2520,12 +2717,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -2795,6 +3023,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -2843,6 +3084,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -2882,8 +3148,15 @@ spec: - provider type: object type: array - required: - - metrics + templates: + items: + properties: + clusterScope: + type: boolean + templateName: + type: string + type: object + type: array type: object required: - spec diff --git a/manifests/crds/cluster-analysis-template-crd.yaml b/manifests/crds/cluster-analysis-template-crd.yaml index 4e5187d8df..444d46fcaa 100644 --- a/manifests/crds/cluster-analysis-template-crd.yaml +++ b/manifests/crds/cluster-analysis-template-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: clusteranalysistemplates.argoproj.io spec: group: argoproj.io @@ -174,14 +174,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -218,6 +239,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -225,6 +249,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -266,13 +293,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -444,6 +472,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -512,6 +550,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -578,6 +626,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -646,6 +704,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -825,6 +893,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -875,6 +951,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1071,13 +1155,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1430,6 +1541,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1480,6 +1599,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1676,13 +1803,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2042,6 +2196,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2092,6 +2254,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2288,13 +2458,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2520,12 +2717,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -2795,6 +3023,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -2843,6 +3084,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -2882,8 +3148,15 @@ spec: - provider type: object type: array - required: - - metrics + templates: + items: + properties: + clusterScope: + type: boolean + templateName: + type: string + type: object + type: array type: object required: - spec diff --git a/manifests/crds/experiment-crd.yaml b/manifests/crds/experiment-crd.yaml index 3693004db0..e4287b51e0 100644 --- a/manifests/crds/experiment-crd.yaml +++ b/manifests/crds/experiment-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: experiments.argoproj.io spec: group: argoproj.io @@ -84,6 +84,17 @@ spec: - templateName type: object type: array + analysisRunMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object dryRun: items: properties: @@ -299,6 +310,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -367,6 +388,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -433,6 +464,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -501,6 +542,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -680,6 +731,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -730,6 +789,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -926,13 +993,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1285,6 +1379,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1335,6 +1437,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1531,13 +1641,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1897,6 +2034,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1947,6 +2092,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2143,13 +2296,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2375,12 +2555,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: diff --git a/manifests/crds/rollout-crd.yaml b/manifests/crds/rollout-crd.yaml index e3f73b1d3e..cd244b8b98 100755 --- a/manifests/crds/rollout-crd.yaml +++ b/manifests/crds/rollout-crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: rollouts.argoproj.io spec: group: argoproj.io @@ -571,6 +571,26 @@ spec: - templateName type: object type: array + analysisRunMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array duration: type: string templates: @@ -903,6 +923,9 @@ spec: - name type: object type: array + maxTrafficWeight: + format: int32 + type: integer nginx: properties: additionalIngressAnnotations: @@ -1083,6 +1106,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -1151,6 +1184,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -1217,6 +1260,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -1285,6 +1338,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -1464,6 +1527,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1514,6 +1585,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1710,13 +1789,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2069,6 +2175,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2119,6 +2233,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2315,13 +2437,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2681,6 +2830,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2731,6 +2888,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2927,13 +3092,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -3159,12 +3351,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -3330,6 +3553,8 @@ spec: type: string name: type: string + scaleDown: + type: string type: object type: object status: diff --git a/manifests/install.yaml b/manifests/install.yaml index f4469973f8..fb7e2d22df 100755 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: analysisruns.argoproj.io spec: group: argoproj.io @@ -179,14 +179,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -223,6 +244,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -230,6 +254,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -271,13 +298,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -449,6 +477,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -517,6 +555,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -583,6 +631,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -651,6 +709,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -830,6 +898,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -880,6 +956,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1076,13 +1160,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -1435,6 +1546,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1485,6 +1604,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -1681,13 +1808,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2047,6 +2201,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2097,6 +2259,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -2293,13 +2463,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -2525,12 +2722,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -2800,6 +3028,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -2848,6 +3089,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -2889,11 +3155,26 @@ spec: type: array terminate: type: boolean + ttlStrategy: + properties: + secondsAfterCompletion: + format: int32 + type: integer + secondsAfterFailure: + format: int32 + type: integer + secondsAfterSuccess: + format: int32 + type: integer + type: object required: - metrics type: object status: properties: + completedAt: + format: date-time + type: string dryRunSummary: properties: count: @@ -3015,7 +3296,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: analysistemplates.argoproj.io spec: group: argoproj.io @@ -3187,14 +3468,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -3231,6 +3533,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -3238,6 +3543,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -3279,13 +3587,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -3457,6 +3766,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -3525,6 +3844,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -3591,6 +3920,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -3659,6 +3998,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -3838,6 +4187,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -3888,6 +4245,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -4084,13 +4449,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -4443,6 +4835,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -4493,6 +4893,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -4689,13 +5097,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -5055,6 +5490,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -5105,6 +5548,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -5301,13 +5752,40 @@ spec: format: int32 type: integer type: object - resources: - properties: - limits: - x-kubernetes-preserve-unknown-fields: true + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -5533,12 +6011,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -5808,6 +6317,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -5856,6 +6378,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -5895,8 +6442,15 @@ spec: - provider type: object type: array - required: - - metrics + templates: + items: + properties: + clusterScope: + type: boolean + templateName: + type: string + type: object + type: array type: object required: - spec @@ -5909,7 +6463,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: clusteranalysistemplates.argoproj.io spec: group: argoproj.io @@ -6081,14 +6635,35 @@ spec: type: object datadog: properties: + aggregator: + enum: + - avg + - min + - max + - sum + - last + - percentile + - mean + - l2norm + - area + type: string apiVersion: + default: v1 + enum: + - v1 + - v2 + type: string + formula: type: string interval: + default: 5m type: string + queries: + additionalProperties: + type: string + type: object query: type: string - required: - - query type: object graphite: properties: @@ -6125,6 +6700,9 @@ spec: backoffLimit: format: int32 type: integer + backoffLimitPerIndex: + format: int32 + type: integer completionMode: type: string completions: @@ -6132,6 +6710,9 @@ spec: type: integer manualSelector: type: boolean + maxFailedIndexes: + format: int32 + type: integer parallelism: format: int32 type: integer @@ -6173,13 +6754,14 @@ spec: x-kubernetes-list-type: atomic required: - action - - onPodConditions type: object type: array x-kubernetes-list-type: atomic required: - rules type: object + podReplacementPolicy: + type: string selector: properties: matchExpressions: @@ -6351,6 +6933,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -6419,6 +7011,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -6485,6 +7087,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -6553,6 +7165,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -6732,6 +7354,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -6782,6 +7412,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -6978,13 +7616,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -7337,6 +8002,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -7387,6 +8060,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -7583,13 +8264,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -7949,6 +8657,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -7999,6 +8715,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -8195,13 +8919,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -8427,12 +9178,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -8702,6 +9484,19 @@ spec: type: string authentication: properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object sigv4: properties: profile: @@ -8750,6 +9545,31 @@ spec: type: object web: properties: + authentication: + properties: + oauth2: + properties: + clientId: + type: string + clientSecret: + type: string + scopes: + items: + type: string + type: array + tokenUrl: + type: string + type: object + sigv4: + properties: + profile: + type: string + region: + type: string + roleArn: + type: string + type: object + type: object body: type: string headers: @@ -8789,8 +9609,15 @@ spec: - provider type: object type: array - required: - - metrics + templates: + items: + properties: + clusterScope: + type: boolean + templateName: + type: string + type: object + type: array type: object required: - spec @@ -8803,7 +9630,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: experiments.argoproj.io spec: group: argoproj.io @@ -8885,6 +9712,17 @@ spec: - templateName type: object type: array + analysisRunMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object dryRun: items: properties: @@ -9100,6 +9938,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -9168,6 +10016,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -9234,6 +10092,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -9302,6 +10170,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -9481,6 +10359,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -9531,6 +10417,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -9727,13 +10621,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -10086,6 +11007,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -10136,6 +11065,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -10332,13 +11269,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -10698,6 +11662,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -10748,6 +11720,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -10944,13 +11924,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -11176,12 +12183,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -11451,7 +12489,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.1 + controller-gen.kubebuilder.io/version: v0.13.0 name: rollouts.argoproj.io spec: group: argoproj.io @@ -12020,6 +13058,26 @@ spec: - templateName type: object type: array + analysisRunMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + dryRun: + items: + properties: + metricName: + type: string + required: + - metricName + type: object + type: array duration: type: string templates: @@ -12352,6 +13410,9 @@ spec: - name type: object type: array + maxTrafficWeight: + format: int32 + type: integer nginx: properties: additionalIngressAnnotations: @@ -12532,6 +13593,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -12600,6 +13671,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -12666,6 +13747,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -12734,6 +13825,16 @@ spec: type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: properties: matchExpressions: @@ -12913,6 +14014,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -12963,6 +14072,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -13159,13 +14276,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -13518,6 +14662,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -13568,6 +14720,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -13764,13 +14924,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -14130,6 +15317,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -14180,6 +15375,14 @@ spec: required: - port type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object tcpSocket: properties: host: @@ -14376,13 +15579,40 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: + claims: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: x-kubernetes-preserve-unknown-fields: true requests: x-kubernetes-preserve-unknown-fields: true type: object + restartPolicy: + type: string securityContext: properties: allowPrivilegeEscalation: @@ -14608,12 +15838,43 @@ spec: - conditionType type: object type: array + resourceClaims: + items: + properties: + name: + type: string + source: + properties: + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map restartPolicy: type: string runtimeClassName: type: string schedulerName: type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map securityContext: properties: fsGroup: @@ -14779,6 +16040,8 @@ spec: type: string name: type: string + scaleDown: + type: string type: object type: object status: @@ -15296,6 +16559,7 @@ rules: - patch - apiGroups: - traefik.containo.us + - traefik.io resources: - traefikservices verbs: @@ -15457,7 +16721,7 @@ spec: matchLabels: app.kubernetes.io/name: argo-rollouts strategy: - type: Recreate + type: RollingUpdate template: metadata: labels: @@ -15490,6 +16754,27 @@ spec: periodSeconds: 5 successThreshold: 1 timeoutSeconds: 4 + resources: + limits: + ephemeral-storage: 1Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /home/argo-rollouts/plugin-bin + name: plugin-bin + - mountPath: /tmp + name: tmp securityContext: runAsNonRoot: true serviceAccountName: argo-rollouts + volumes: + - emptyDir: {} + name: plugin-bin + - emptyDir: {} + name: tmp diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index b8c7638b4d..c52eff182b 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -217,6 +217,7 @@ rules: - patch - apiGroups: - traefik.containo.us + - traefik.io resources: - traefikservices verbs: @@ -377,7 +378,7 @@ spec: matchLabels: app.kubernetes.io/name: argo-rollouts strategy: - type: Recreate + type: RollingUpdate template: metadata: labels: @@ -412,6 +413,27 @@ spec: periodSeconds: 5 successThreshold: 1 timeoutSeconds: 4 + resources: + limits: + ephemeral-storage: 1Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /home/argo-rollouts/plugin-bin + name: plugin-bin + - mountPath: /tmp + name: tmp securityContext: runAsNonRoot: true serviceAccountName: argo-rollouts + volumes: + - emptyDir: {} + name: plugin-bin + - emptyDir: {} + name: tmp diff --git a/manifests/namespace-install/kustomization.yaml b/manifests/namespace-install/kustomization.yaml index 5d902d1c4c..153432ec04 100644 --- a/manifests/namespace-install/kustomization.yaml +++ b/manifests/namespace-install/kustomization.yaml @@ -8,8 +8,8 @@ bases: resources: - argo-rollouts-rolebinding.yaml -patchesStrategicMerge: -- add-namespaced-flag.yaml +patches: +- path: add-namespaced-flag.yaml patchesJson6902: - path: clusterrole-to-role.yaml diff --git a/manifests/notifications/kustomization.yaml b/manifests/notifications/kustomization.yaml index e8b7beeed9..751122f02c 100644 --- a/manifests/notifications/kustomization.yaml +++ b/manifests/notifications/kustomization.yaml @@ -4,13 +4,13 @@ kind: Kustomization resources: - argo-rollouts-notification-configmap.yaml -patchesStrategicMerge: - - on-rollout-completed.yaml - - on-scaling-replica-set.yaml - - on-rollout-step-completed.yaml - - on-rollout-updated.yaml - - on-rollout-aborted.yaml - - on-rollout-paused.yaml - - on-analysis-run-running.yaml - - on-analysis-run-error.yaml - - on-analysis-run-failed.yaml +patches: + - path: on-rollout-completed.yaml + - path: on-scaling-replica-set.yaml + - path: on-rollout-step-completed.yaml + - path: on-rollout-updated.yaml + - path: on-rollout-aborted.yaml + - path: on-rollout-paused.yaml + - path: on-analysis-run-running.yaml + - path: on-analysis-run-error.yaml + - path: on-analysis-run-failed.yaml diff --git a/manifests/role/argo-rollouts-clusterrole.yaml b/manifests/role/argo-rollouts-clusterrole.yaml index 95cc82004d..aab253e5ea 100644 --- a/manifests/role/argo-rollouts-clusterrole.yaml +++ b/manifests/role/argo-rollouts-clusterrole.yaml @@ -224,6 +224,7 @@ rules: - patch - apiGroups: - traefik.containo.us + - traefik.io resources: - traefikservices verbs: diff --git a/metricproviders/datadog/datadog.go b/metricproviders/datadog/datadog.go index 00f9c8a8ac..48b60fc0f3 100644 --- a/metricproviders/datadog/datadog.go +++ b/metricproviders/datadog/datadog.go @@ -26,6 +26,7 @@ import ( "k8s.io/client-go/kubernetes" ) +// This is done so we can explicitly override it in the unit test var unixNow = func() int64 { return timeutil.Now().Unix() } const ( @@ -35,7 +36,6 @@ const ( DatadogApiKey = "api-key" DatadogAppKey = "app-key" DatadogAddress = "address" - DefaultApiVersion = "v1" ) // Provider contains all the required components to run a Datadog query @@ -46,9 +46,10 @@ type Provider struct { } type datadogQueryAttributes struct { - From int64 `json:"from"` - To int64 `json:"to"` - Queries []map[string]string `json:"queries"` + From int64 `json:"from"` + To int64 `json:"to"` + Queries []map[string]string `json:"queries"` + Formulas []map[string]string `json:"formulas"` } type datadogQuery struct { @@ -69,8 +70,9 @@ type datadogResponseV1 struct { type datadogResponseV2 struct { Data struct { Attributes struct { - Values [][]float64 - Times []int64 + Columns []struct { + Values []float64 + } } Errors string } @@ -82,7 +84,7 @@ type datadogConfig struct { AppKey string `yaml:"app-key,omitempty"` } -// Type incidates provider is a Datadog provider +// Type indicates provider is a Datadog provider func (p *Provider) Type() string { return ProviderType } @@ -92,57 +94,58 @@ func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { return nil } -func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { - startTime := timeutil.MetaNow() - - // Measurement to pass back - measurement := v1alpha1.Measurement{ - StartedAt: &startTime, - } - +func (p *Provider) buildEndpointUrl(apiVersion string) (*url.URL, error) { endpoint := "https://api.datadoghq.com" if p.config.Address != "" { endpoint = p.config.Address } - // Check if the URL is valid first before adding the endpoint + // Check if the user provided URL is valid first before adding the endpoint url, err := url.Parse(endpoint) if err != nil { - return metricutil.MarkMeasurementError(measurement, err) + return nil, err } - apiVersion := DefaultApiVersion - if metric.Provider.Datadog.ApiVersion != "" { - apiVersion = metric.Provider.Datadog.ApiVersion + route := "/api/v1/query" + if apiVersion == "v2" { + route = "/api/v2/query/scalar" } - if apiVersion == "v1" { + // Add endpoint after getting the API version + url, err = url.Parse(endpoint + route) + if err != nil { + return nil, err + } + return url, err +} + +func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { + startTime := timeutil.MetaNow() + dd := metric.Provider.Datadog + + if dd.ApiVersion == "v1" { p.logCtx.Warn("Datadog will soon deprecate their API v1. Please consider switching to v2 soon.") } - route := "/api/v1/query" - if apiVersion == "v2" { - route = "/api/v2/query/timeseries" + // Measurement to pass back + measurement := v1alpha1.Measurement{ + StartedAt: &startTime, } - // Add endpoint after getting the API version - url, err = url.Parse(endpoint + route) + url, err := p.buildEndpointUrl(dd.ApiVersion) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } - now := unixNow() - var interval int64 = 300 - if metric.Provider.Datadog.Interval != "" { - expDuration, err := metric.Provider.Datadog.Interval.Duration() - if err != nil { - return metricutil.MarkMeasurementError(measurement, err) - } - // Convert to seconds as DataDog expects unix timestamp - interval = int64(expDuration.Seconds()) + // Interval default is in the spec. bigger things would need to fail to get here without an dd.Interval + expDuration, err := dd.Interval.Duration() + if err != nil { + return metricutil.MarkMeasurementError(measurement, err) } + // Convert to seconds as DataDog expects unix timestamp + interval := int64(expDuration.Seconds()) - request, err := p.createRequest(metric.Provider.Datadog.Query, apiVersion, now, interval, url) + request, err := p.createRequest(dd, unixNow(), interval, url) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } @@ -158,12 +161,11 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph Timeout: time.Duration(10) * time.Second, } response, err := httpClient.Do(request) - if err != nil { return metricutil.MarkMeasurementError(measurement, err) } - value, status, err := p.parseResponse(metric, response, apiVersion) + value, status, err := p.parseResponse(metric, response, dd.ApiVersion) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } @@ -176,52 +178,84 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph return measurement } -func (p *Provider) createRequest(query string, apiVersion string, now int64, interval int64, url *url.URL) (*http.Request, error) { - if apiVersion == "v1" { - q := url.Query() - q.Set("query", query) - q.Set("from", strconv.FormatInt(now-interval, 10)) - q.Set("to", strconv.FormatInt(now, 10)) - url.RawQuery = q.Encode() - - return &http.Request{Method: "GET"}, nil - } else if apiVersion == "v2" { - queryBody, err := json.Marshal(datadogRequest{ - Data: datadogQuery{ - QueryType: "timeseries_request", - Attributes: datadogQueryAttributes{ - From: (now - interval) * 1000, - To: now * 1000, - Queries: []map[string]string{{ - "data_source": "metrics", - "query": query, - }}, - }, - }}) - if err != nil { - return nil, fmt.Errorf("Could not parse your JSON request: %v", err) +func (p *Provider) createRequest(dd *v1alpha1.DatadogMetric, now int64, interval int64, url *url.URL) (*http.Request, error) { + if dd.ApiVersion == "v1" { + return p.createRequestV1(dd.Query, now, interval, url) + } + + // we know dd.Query and dd.Queries are mutually exclusive. + if dd.Query != "" { + dd.Queries = map[string]string{"query": dd.Query} + } + + return p.createRequestV2(dd.Queries, dd.Formula, now, interval, dd.Aggregator, url) +} + +func (p *Provider) createRequestV1(query string, now int64, interval int64, url *url.URL) (*http.Request, error) { + q := url.Query() + q.Set("query", query) + q.Set("from", strconv.FormatInt(now-interval, 10)) + q.Set("to", strconv.FormatInt(now, 10)) + url.RawQuery = q.Encode() + + return &http.Request{Method: "GET"}, nil +} + +func buildQueriesPayload(queries map[string]string, aggregator string) []map[string]string { + qp := make([]map[string]string, 0, len(queries)) + for k, v := range queries { + p := map[string]string{ + "aggregator": aggregator, + "data_source": "metrics", + "name": k, + "query": v, } - request := &http.Request{Method: "POST"} - request.Body = io.NopCloser(bytes.NewReader(queryBody)) - return request, nil + qp = append(qp, p) + } + return qp +} + +func (p *Provider) createRequestV2(queries map[string]string, formula string, now int64, interval int64, aggregator string, url *url.URL) (*http.Request, error) { + formulas := []map[string]string{} + // ddAPI supports multiple formulas but doesn't make sense in our context + // can't have a 'blank' formula, so have to guard + if formula != "" { + formulas = []map[string]string{{ + "formula": formula, + }} } - return nil, fmt.Errorf("Invalid API version: %s", apiVersion) + attribs := datadogQueryAttributes{ + // Datadog requires milliseconds for v2 api + From: (now - interval) * 1000, + To: now * 1000, + Queries: buildQueriesPayload(queries, aggregator), + Formulas: formulas, + } + + queryBody, err := json.Marshal(datadogRequest{ + Data: datadogQuery{ + QueryType: "scalar_request", + Attributes: attribs, + }, + }) + if err != nil { + return nil, fmt.Errorf("Could not parse your JSON request: %v", err) + } + request := &http.Request{Method: "POST"} + request.Body = io.NopCloser(bytes.NewReader(queryBody)) + return request, nil } func (p *Provider) parseResponse(metric v1alpha1.Metric, response *http.Response, apiVersion string) (string, v1alpha1.AnalysisPhase, error) { if apiVersion == "v1" { return p.parseResponseV1(metric, response) - } else if apiVersion == "v2" { - return p.parseResponseV2(metric, response) } - return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Invalid API version: %s", apiVersion) + return p.parseResponseV2(metric, response) } func (p *Provider) parseResponseV1(metric v1alpha1.Metric, response *http.Response) (string, v1alpha1.AnalysisPhase, error) { - bodyBytes, err := io.ReadAll(response.Body) - if err != nil { return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Received no bytes in response: %v", err) } @@ -263,9 +297,7 @@ func (p *Provider) parseResponseV1(metric v1alpha1.Metric, response *http.Respon } func (p *Provider) parseResponseV2(metric v1alpha1.Metric, response *http.Response) (string, v1alpha1.AnalysisPhase, error) { - bodyBytes, err := io.ReadAll(response.Body) - if err != nil { return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Received no bytes in response: %v", err) } @@ -288,26 +320,30 @@ func (p *Provider) parseResponseV2(metric v1alpha1.Metric, response *http.Respon } // Handle an empty query result - if reflect.ValueOf(res.Data.Attributes).IsZero() || len(res.Data.Attributes.Values) == 0 || len(res.Data.Attributes.Times) == 0 { + if reflect.ValueOf(res.Data.Attributes).IsZero() || len(res.Data.Attributes.Columns) == 0 || len(res.Data.Attributes.Columns[0].Values) == 0 { var nilFloat64 *float64 status, err := evaluate.EvaluateResult(nilFloat64, metric, p.logCtx) - attributesBytes, jsonErr := json.Marshal(res.Data.Attributes) + + var attributesBytes []byte + var jsonErr error + // Should be impossible for this to not be true, based on dd openapi spec. + // But in this case, better safe than sorry + if len(res.Data.Attributes.Columns) == 1 { + attributesBytes, jsonErr = json.Marshal(res.Data.Attributes.Columns[0].Values) + } else { + attributesBytes, jsonErr = json.Marshal(res.Data.Attributes) + } + if jsonErr != nil { - return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Failed to marshall JSON empty series: %v", jsonErr) + return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Failed to marshall JSON empty Values: %v", jsonErr) } return string(attributesBytes), status, err } // Handle a populated query result - attributes := res.Data.Attributes - datapoint := attributes.Values[0] - timepoint := attributes.Times[len(attributes.Times)-1] - if timepoint == 0 { - return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("Datapoint does not have a corresponding time value") - } - - value := datapoint[len(datapoint)-1] + column := res.Data.Attributes.Columns[0] + value := column.Values[0] status, err := evaluate.EvaluateResult(value, metric, p.logCtx) return strconv.FormatFloat(value, 'f', -1, 64), status, err } @@ -341,7 +377,48 @@ func lookupKeysInEnv(keys []string) map[string]string { return valuesByKey } -func NewDatadogProvider(logCtx log.Entry, kubeclientset kubernetes.Interface) (*Provider, error) { +// The current gen tooling we are using can't generate CRD with all the validations we need. +// This is unfortunate, user has more ways to deliver an invalid Analysis Template vs +// being rejected on delivery by k8s (and allowing for a validation step if desired in CI/CD). +// So we run through all the checks here. If the situation changes (eg: being able to use oneOf with required) +// in the CRD spec, please update. +func validateIncomingProps(dd *v1alpha1.DatadogMetric) error { + // check that we have the required field + if dd.Query == "" && len(dd.Queries) == 0 { + return errors.New("Must have either a query or queries. Please review the Analysis Template.") + } + + // check that we have ONE OF query/queries + if dd.Query != "" && len(dd.Queries) > 0 { + return errors.New("Cannot have both a query and queries. Please review the Analysis Template.") + } + + // check that query is set for apiversion v1 + if dd.ApiVersion == "v1" && dd.Query == "" { + return errors.New("Query is empty. API Version v1 only supports using the query parameter in your Analysis Template.") + } + + // formula <3 queries. won't go anywhere without them + if dd.Formula != "" && len(dd.Queries) == 0 { + return errors.New("Formula are only valid when queries are set. Please review the Analysis Template.") + } + + // Reject queries with more than 1 when NO formula provided. While this would technically work + // DD will return 2 columns of data, and there is no guarantee what order they would be in, so + // there is no way to guess at intention of user. Since this is about metrics and monitoring, we should + // avoid ambiguity. + if dd.Formula == "" && len(dd.Queries) > 1 { + return errors.New("When multiple queries are provided you must include a formula.") + } + + if dd.ApiVersion == "v1" && dd.Aggregator != "" { + return errors.New("Aggregator is not supported in v1. Please review the Analysis Template.") + } + + return nil +} + +func NewDatadogProvider(logCtx log.Entry, kubeclientset kubernetes.Interface, metric v1alpha1.Metric) (*Provider, error) { ns := defaults.Namespace() apiKey := "" @@ -366,6 +443,12 @@ func NewDatadogProvider(logCtx log.Entry, kubeclientset kubernetes.Interface) (* } if apiKey != "" && appKey != "" { + + err := validateIncomingProps(metric.Provider.Datadog) + if err != nil { + return nil, err + } + return &Provider{ logCtx: logCtx, config: datadogConfig{ @@ -377,5 +460,4 @@ func NewDatadogProvider(logCtx log.Entry, kubeclientset kubernetes.Interface) (* } else { return nil, errors.New("API or App token not found") } - } diff --git a/metricproviders/datadog/datadogV1_test.go b/metricproviders/datadog/datadogV1_test.go index 5f56ae3c0d..af58d47617 100644 --- a/metricproviders/datadog/datadogV1_test.go +++ b/metricproviders/datadog/datadogV1_test.go @@ -19,7 +19,6 @@ import ( ) func TestRunSuite(t *testing.T) { - const expectedApiKey = "0123456789abcdef0123456789abcdef" const expectedAppKey = "0123456789abcdef0123456789abcdef01234567" @@ -39,7 +38,7 @@ func TestRunSuite(t *testing.T) { } // Test Cases - var tests = []struct { + tests := []struct { serverURL string webServerStatus int webServerResponse string @@ -144,7 +143,7 @@ func TestRunSuite(t *testing.T) { // Expect error with no default() and no data { webServerStatus: 200, - webServerResponse: `{"status":"ok","series":[{"pointlist":[]}]}`, + webServerResponse: `{"status":"ok","series":[]}`, metric: v1alpha1.Metric{ Name: "foo", SuccessCondition: "result < 0.05", @@ -159,14 +158,14 @@ func TestRunSuite(t *testing.T) { // Expect success with default() and no data { webServerStatus: 200, - webServerResponse: `{"status":"ok","series":[{"pointlist":[]}]}`, + webServerResponse: `{"status":"ok","series":[]}`, metric: v1alpha1.Metric{ Name: "foo", SuccessCondition: "default(result, 0) < 0.05", Provider: ddProviderIntervalDefault, }, expectedIntervalSeconds: 300, - expectedValue: `[{"pointlist":[]}]`, + expectedValue: `[]`, expectedPhase: v1alpha1.AnalysisPhaseSuccessful, useEnvVarForKeys: false, }, @@ -174,14 +173,14 @@ func TestRunSuite(t *testing.T) { // Expect failure with bad default() and no data { webServerStatus: 200, - webServerResponse: `{"status":"ok","series":[{"pointlist":[]}]}`, + webServerResponse: `{"status":"ok","series":[]}`, metric: v1alpha1.Metric{ Name: "foo", SuccessCondition: "default(result, 1) < 0.05", Provider: ddProviderIntervalDefault, }, expectedIntervalSeconds: 300, - expectedValue: `[{"pointlist":[]}]`, + expectedValue: `[]`, expectedPhase: v1alpha1.AnalysisPhaseFailed, useEnvVarForKeys: false, }, @@ -219,8 +218,10 @@ func TestRunSuite(t *testing.T) { // Error if server address is faulty { - serverURL: "://wrong.schema", - metric: v1alpha1.Metric{}, + serverURL: "://wrong.schema", + metric: v1alpha1.Metric{ + Provider: ddProviderInterval10m, + }, expectedPhase: v1alpha1.AnalysisPhaseError, expectedErrorMessage: "parse \"://wrong.schema\": missing protocol scheme", useEnvVarForKeys: false, @@ -235,11 +236,7 @@ func TestRunSuite(t *testing.T) { if serverURL == "" { // Server setup with response server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if test.metric.Provider.Datadog.ApiVersion == "" && DefaultApiVersion != "v1" { - t.Errorf("\nApiVersion was left blank in the tests, but the default API version is not v1 anymore.") - } - - //Check query variables + // Check query variables actualQuery := req.URL.Query().Get("query") actualFrom := req.URL.Query().Get("from") actualTo := req.URL.Query().Get("to") @@ -260,7 +257,7 @@ func TestRunSuite(t *testing.T) { t.Errorf("\nfailed to parse to: %v", err) } - //Check headers + // Check headers if req.Header.Get("Content-Type") != "application/json" { t.Errorf("\nContent-Type header expected to be application/json but got %s", req.Header.Get("Content-Type")) } @@ -315,7 +312,16 @@ func TestRunSuite(t *testing.T) { return true, tokenSecret, nil }) - provider, _ := NewDatadogProvider(*logCtx, fakeClient) + // Enforce these having defaults + if test.metric.Provider.Datadog.ApiVersion == "" { + test.metric.Provider.Datadog.ApiVersion = "v1" + } + + if test.metric.Provider.Datadog.Interval == "" { + test.metric.Provider.Datadog.Interval = "5m" + } + + provider, _ := NewDatadogProvider(*logCtx, fakeClient, test.metric) metricsMetadata := provider.GetMetadata(test.metric) assert.Nil(t, metricsMetadata) diff --git a/metricproviders/datadog/datadogV2_test.go b/metricproviders/datadog/datadogV2_test.go index 11a82411c7..f3e481160e 100644 --- a/metricproviders/datadog/datadogV2_test.go +++ b/metricproviders/datadog/datadogV2_test.go @@ -3,7 +3,6 @@ package datadog import ( "encoding/json" "io" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -19,30 +18,48 @@ import ( kubetesting "k8s.io/client-go/testing" ) -func TestRunSuiteV2(t *testing.T) { - - const expectedApiKey = "0123456789abcdef0123456789abcdef" - const expectedAppKey = "0123456789abcdef0123456789abcdef01234567" - - unixNow = func() int64 { return 1599076435 } - - ddProviderIntervalDefault := v1alpha1.MetricProvider{ +func newQueryDefaultProvider() v1alpha1.MetricProvider { + return v1alpha1.MetricProvider{ Datadog: &v1alpha1.DatadogMetric{ + Interval: "5m", Query: "avg:kubernetes.cpu.user.total{*}", ApiVersion: "v2", }, } +} - ddProviderInterval10m := v1alpha1.MetricProvider{ +func newQueriesDefaultProvider() v1alpha1.MetricProvider { + return v1alpha1.MetricProvider{ + Datadog: &v1alpha1.DatadogMetric{ + Interval: "5m", + Queries: map[string]string{ + "a": "avg:error_requests{*}", + "b": "avg:total_requests{*}", + }, + Formula: "a/b", + ApiVersion: "v2", + }, + } +} + +func newQueryProviderInterval10m() v1alpha1.MetricProvider { + return v1alpha1.MetricProvider{ Datadog: &v1alpha1.DatadogMetric{ Query: "avg:kubernetes.cpu.user.total{*}", Interval: "10m", ApiVersion: "v2", }, } +} + +func TestRunSuiteV2(t *testing.T) { + const expectedApiKey = "0123456789abcdef0123456789abcdef" + const expectedAppKey = "0123456789abcdef0123456789abcdef01234567" + + unixNow = func() int64 { return 1599076435 } // Test Cases - var tests = []struct { + tests := []struct { serverURL string webServerStatus int webServerResponse string @@ -53,75 +70,70 @@ func TestRunSuiteV2(t *testing.T) { expectedErrorMessage string useEnvVarForKeys bool }{ - // When last value of time series matches condition then succeed. { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [[0.0020008318672513122, 0.0003332881882246533]], "times": [1598867910000, 1598867925000]}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.0006332881882246533]}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "simple scalar query", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderInterval10m, + Provider: newQueryProviderInterval10m(), }, expectedIntervalSeconds: 600, - expectedValue: "0.0003332881882246533", + expectedValue: "0.0006332881882246533", expectedPhase: v1alpha1.AnalysisPhaseSuccessful, useEnvVarForKeys: false, }, - // Same test as above, but derive DD keys from env var instead of k8s secret { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [[0.0020008318672513122, 0.0003332881882246533]], "times": [1598867910000, 1598867925000]}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.0003332881882246533]}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "keys from env vars", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderInterval10m, + Provider: newQueryProviderInterval10m(), }, expectedIntervalSeconds: 600, expectedValue: "0.0003332881882246533", expectedPhase: v1alpha1.AnalysisPhaseSuccessful, useEnvVarForKeys: true, }, - // When last value of time series does not match condition then fail. { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [[0.0020008318672513122, 0.006121378742186943]], "times": [1598867910000, 1598867925000]}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.006121374442186943]}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "value does not match condition then fail", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, - expectedValue: "0.006121378742186943", + expectedValue: "0.006121374442186943", expectedPhase: v1alpha1.AnalysisPhaseFailed, useEnvVarForKeys: false, }, - // Error if the request is invalid { webServerStatus: 400, webServerResponse: `{"status":"error","error":"error messsage"}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "error for invalid request", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, expectedPhase: v1alpha1.AnalysisPhaseError, expectedErrorMessage: "received non 2xx response code: 400 {\"status\":\"error\",\"error\":\"error messsage\"}", useEnvVarForKeys: false, }, - // Error if there is an authentication issue { webServerStatus: 401, webServerResponse: `{"errors": ["No authenticated user."]}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "no authenticated user", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, expectedPhase: v1alpha1.AnalysisPhaseError, @@ -129,14 +141,13 @@ func TestRunSuiteV2(t *testing.T) { useEnvVarForKeys: false, }, - // Expect success with default() and data { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [[0.0020008318672513122, 0.006121378742186943]], "times": [1598867910000, 1598867925000]}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.006121378742186943]}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "success with default and data", SuccessCondition: "default(result, 0) < 0.05", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, expectedValue: "0.006121378742186943", @@ -144,14 +155,13 @@ func TestRunSuiteV2(t *testing.T) { useEnvVarForKeys: false, }, - // Expect error with no default() and no data { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [], "times": []}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": []}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "error with no default and no data", SuccessCondition: "result < 0.05", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, expectedPhase: v1alpha1.AnalysisPhaseError, @@ -159,75 +169,89 @@ func TestRunSuiteV2(t *testing.T) { useEnvVarForKeys: false, }, - // Expect success with default() and no data { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [], "times": []}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": []}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "success with default and no data", SuccessCondition: "default(result, 0) < 0.05", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, - expectedValue: `{"Values":[],"Times":[]}`, + expectedValue: `[]`, expectedPhase: v1alpha1.AnalysisPhaseSuccessful, useEnvVarForKeys: false, }, - // Expect failure with bad default() and no data { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [], "times": []}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": []}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "fail when bad default and no data", SuccessCondition: "default(result, 1) < 0.05", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, - expectedValue: `{"Values":[],"Times":[]}`, + expectedValue: `[]`, expectedPhase: v1alpha1.AnalysisPhaseFailed, useEnvVarForKeys: false, }, - // Expect success with bad default() and good data { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": [[0.0020008318672513122, 0.006121378742186943]], "times": [1598867910000, 1598867925000]}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.006721378742186999]}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "success bad default and good data", SuccessCondition: "default(result, 1) < 0.05", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, - expectedValue: `0.006121378742186943`, + expectedValue: `0.006721378742186999`, expectedPhase: v1alpha1.AnalysisPhaseSuccessful, useEnvVarForKeys: false, }, - // Error if datadog returns non-array values { webServerStatus: 200, - webServerResponse: `{"data": {"attributes": {"values": "invalid", "times": "invalid"}}}`, + webServerResponse: `{"data": {"attributes": {"columns": [{"values": "invalid"}]}}}`, metric: v1alpha1.Metric{ - Name: "foo", + Name: "error when bad values from dd", SuccessCondition: "result < 0.001", FailureCondition: "result >= 0.001", - Provider: ddProviderIntervalDefault, + Provider: newQueryDefaultProvider(), }, expectedIntervalSeconds: 300, expectedPhase: v1alpha1.AnalysisPhaseError, - expectedErrorMessage: "Could not parse JSON body: json: cannot unmarshal string into Go struct field .Data.Attributes.Values of type [][]float64", + expectedErrorMessage: "Could not parse JSON body: json: cannot unmarshal string into Go struct field .Data.Attributes.Columns.Values of type []float64", useEnvVarForKeys: false, }, // Error if server address is faulty { - serverURL: "://wrong.schema", - metric: v1alpha1.Metric{}, + serverURL: "://wrong.schema", + metric: v1alpha1.Metric{ + Provider: newQueryProviderInterval10m(), + }, expectedPhase: v1alpha1.AnalysisPhaseError, expectedErrorMessage: "parse \"://wrong.schema\": missing protocol scheme", useEnvVarForKeys: false, }, + + // Queries + Formula + // Expect success with default() and data + { + webServerStatus: 200, + webServerResponse: `{"data": {"attributes": {"columns": [ {"values": [0.0006444881882246533]}]}}}`, + metric: v1alpha1.Metric{ + Name: "expect success queries and formula", + SuccessCondition: "default(result, 0) < 0.05", + Provider: newQueriesDefaultProvider(), + }, + expectedIntervalSeconds: 300, + expectedValue: "0.0006444881882246533", + expectedPhase: v1alpha1.AnalysisPhaseSuccessful, + useEnvVarForKeys: false, + }, } // Run @@ -238,9 +262,8 @@ func TestRunSuiteV2(t *testing.T) { if serverURL == "" { // Server setup with response server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - - //Check query variables - bodyBytes, err := ioutil.ReadAll(req.Body) + // Check query variables + bodyBytes, err := io.ReadAll(req.Body) if err != nil { t.Errorf("\nreceived no bytes in request: %v", err) } @@ -251,12 +274,29 @@ func TestRunSuiteV2(t *testing.T) { t.Errorf("\nCould not parse JSON request body: %v", err) } + // Keep the simple check behaviour if there is no Queries passed in from the analysis run + usesQuery := len(test.metric.Provider.Datadog.Queries) == 0 + usesFormula := test.metric.Provider.Datadog.Formula != "" + + actualFormulas := reqBody.Data.Attributes.Formulas actualQuery := reqBody.Data.Attributes.Queries[0]["query"] + actualQueries := reqBody.Data.Attributes.Queries actualFrom := reqBody.Data.Attributes.From actualTo := reqBody.Data.Attributes.To - if actualQuery != "avg:kubernetes.cpu.user.total{*}" { - t.Errorf("\nquery expected avg:kubernetes.cpu.user.total{*} but got %s", actualQuery) + if usesQuery { + if actualQuery != "avg:kubernetes.cpu.user.total{*}" { + t.Errorf("\nquery expected avg:kubernetes.cpu.user.total{*} but got %s", actualQuery) + } + } else { + // Check queries has expected number of queries + if len(actualQueries) != len(test.metric.Provider.Datadog.Queries) { + t.Errorf("\nExpected %d queries but received %d", len(test.metric.Provider.Datadog.Queries), len(reqBody.Data.Attributes.Queries)) + } + + if usesFormula && len(actualFormulas) == 0 { + t.Errorf("\nExpected formula but no Formulas in request: %+v", actualFormulas) + } } if actualFrom != (unixNow()-test.expectedIntervalSeconds)*1000 { @@ -271,7 +311,7 @@ func TestRunSuiteV2(t *testing.T) { t.Errorf("\nfailed to parse to: %v", err) } - //Check headers + // Check headers if req.Header.Get("Content-Type") != "application/json" { t.Errorf("\nContent-Type header expected to be application/json but got %s", req.Header.Get("Content-Type")) } @@ -326,7 +366,7 @@ func TestRunSuiteV2(t *testing.T) { return true, tokenSecret, nil }) - provider, _ := NewDatadogProvider(*logCtx, fakeClient) + provider, _ := NewDatadogProvider(*logCtx, fakeClient, test.metric) metricsMetadata := provider.GetMetadata(test.metric) assert.Nil(t, metricsMetadata) diff --git a/metricproviders/datadog/datadog_test.go b/metricproviders/datadog/datadog_test.go new file mode 100644 index 0000000000..ed81977296 --- /dev/null +++ b/metricproviders/datadog/datadog_test.go @@ -0,0 +1,181 @@ +// These are tests that don't belong to v1 or v2 API + +package datadog + +import ( + "log" + "os" + "testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/stretchr/testify/assert" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +func TestDatadogSpecDefaults(t *testing.T) { + _ = apiextv1.AddToScheme(scheme.Scheme) + decode := scheme.Codecs.UniversalDeserializer().Decode + + // Load CRD yaml. Is this icky? It seems like the best way to guarantee + // what we expect, since setting the default is via annotations + // Only bothering with the analysis-template-crd. We know cluster-analysis-template-crd + // & analysis-run-crd are all generated from the same source object. + data, err := os.ReadFile("../../manifests/crds/analysis-template-crd.yaml") + if err != nil { + log.Fatalf("could not read CRD file: %v", err) + } + obj, gVK, err := decode(data, nil, nil) + if err != nil { + log.Fatalf("error parsing file: %v", err) + } + if gVK.Kind != "CustomResourceDefinition" { + log.Fatalf("object was not a CRD") + } + + o := obj.(*apiextv1.CustomResourceDefinition) + ddSpec := o.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["spec"].Properties["metrics"].Items.Schema.Properties["provider"].Properties["datadog"] + + t.Run("apiVersion: Validate default is v1", func(t *testing.T) { + defaultVersion := string(ddSpec.Properties["apiVersion"].Default.Raw) + assert.Equal(t, "\"v1\"", defaultVersion, "Default version should be v1") + }) + + t.Run("apiVersion: Validate enum exists to restrict apiVersion to 2 options", func(t *testing.T) { + versionEnums := ddSpec.Properties["apiVersion"].Enum + assert.Equal(t, 2, len(versionEnums), "Expecting 2 enum options") + assert.Equal(t, "\"v1\"", string(versionEnums[0].Raw), "\"v1\" expected, got %s", string(versionEnums[0].Raw)) + assert.Equal(t, "\"v2\"", string(versionEnums[1].Raw), "\"v2\" is missing, got %s", string(versionEnums[1].Raw)) + }) + + t.Run("interval: Validate default is 5m", func(t *testing.T) { + defaultInterval := string(ddSpec.Properties["interval"].Default.Raw) + assert.Equal(t, "\"5m\"", defaultInterval, "Default interval should be \"5m\" ") + }) + + t.Run("aggregator: Validate enum exists to restrict aggregator to 9 options", func(t *testing.T) { + aggregatorEnums := ddSpec.Properties["aggregator"].Enum + assert.Equal(t, 9, len(aggregatorEnums), "Expecting 9 enum options") + assert.Equal(t, "\"avg\"", string(aggregatorEnums[0].Raw), "\"avg\" expected, got %s", string(aggregatorEnums[0].Raw)) + assert.Equal(t, "\"min\"", string(aggregatorEnums[1].Raw), "\"min\" expected, got %s", string(aggregatorEnums[1].Raw)) + assert.Equal(t, "\"max\"", string(aggregatorEnums[2].Raw), "\"max\" expected, got %s", string(aggregatorEnums[2].Raw)) + assert.Equal(t, "\"sum\"", string(aggregatorEnums[3].Raw), "\"sum\" expected, got %s", string(aggregatorEnums[3].Raw)) + assert.Equal(t, "\"last\"", string(aggregatorEnums[4].Raw), "\"last\" expected, got %s", string(aggregatorEnums[4].Raw)) + assert.Equal(t, "\"percentile\"", string(aggregatorEnums[5].Raw), "\"percentile\" expected, got %s", string(aggregatorEnums[5].Raw)) + assert.Equal(t, "\"mean\"", string(aggregatorEnums[6].Raw), "\"mean\" expected, got %s", string(aggregatorEnums[6].Raw)) + assert.Equal(t, "\"l2norm\"", string(aggregatorEnums[7].Raw), "\"l2norm\" expected, got %s", string(aggregatorEnums[7].Raw)) + assert.Equal(t, "\"area\"", string(aggregatorEnums[8].Raw), "\"area\" expected, got %s", string(aggregatorEnums[8].Raw)) + }) +} + +func TestValidateIncomingProps(t *testing.T) { + tests := []struct { + name string + metric *v1alpha1.DatadogMetric + expectedErrorMessage string + }{ + { + name: "query and queries missing", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Query: "", + Queries: nil, + }, + expectedErrorMessage: "Must have either a query or queries", + }, + { + name: "both query and queries", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Query: "foo", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()"}, + }, + expectedErrorMessage: "Cannot have both a query and queries", + }, + { + name: "queries with v1 api", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()"}, + }, + expectedErrorMessage: "Query is empty. API Version v1 only supports using the query parameter in your Analysis Template.", + }, + { + name: "formula/queries with wrong apiVersion", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()"}, + Formula: "a", + }, + expectedErrorMessage: "Query is empty. API Version v1 only supports using the query parameter in your Analysis Template.", + }, + { + name: "formula without queries", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Formula: "foo / bar", + Query: "foo", + }, + expectedErrorMessage: "Formula are only valid when queries are set", + }, + { + name: "v1 query with aggregator", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v1", + Query: "foo", + Aggregator: "sum", + }, + expectedErrorMessage: "Aggregator is not supported in v1. Please review the Analysis Template.", + }, + { + name: "More than 1 queries with no formula", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v2", + Query: "", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()", "b": "fish bike"}, + }, + expectedErrorMessage: "When multiple queries are provided you must include a formula.", + }, + { + name: "valid simple query with v2", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v2", + Query: "foo", + }, + expectedErrorMessage: "", + }, + { + name: "valid queries with v2", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v2", + Query: "", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()", "b": "fish bike"}, + Formula: "a + b", + }, + expectedErrorMessage: "", + }, + { + name: "valid queries with v2 and an aggregator", + metric: &v1alpha1.DatadogMetric{ + ApiVersion: "v2", + Query: "", + Queries: map[string]string{"a": "sum:api_gateway.request.count{*}.as_count()", "b": "fish bike"}, + Formula: "a + b", + Aggregator: "avg", + }, + expectedErrorMessage: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := validateIncomingProps(test.metric) + if test.expectedErrorMessage != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.expectedErrorMessage) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/metricproviders/graphite/api.go b/metricproviders/graphite/api.go index 06833ac028..bd470de474 100644 --- a/metricproviders/graphite/api.go +++ b/metricproviders/graphite/api.go @@ -87,7 +87,7 @@ type dataPoint struct { } func (gdp *dataPoint) UnmarshalJSON(data []byte) error { - var v []interface{} + var v []any if err := json.Unmarshal(data, &v); err != nil { return err } diff --git a/metricproviders/influxdb/influxdb.go b/metricproviders/influxdb/influxdb.go index f67c8d5f94..4266d2e2f6 100644 --- a/metricproviders/influxdb/influxdb.go +++ b/metricproviders/influxdb/influxdb.go @@ -87,7 +87,7 @@ func (p *Provider) GarbageCollect(run *v1alpha1.AnalysisRun, metric v1alpha1.Met } func (p *Provider) processResponse(metric v1alpha1.Metric, result *influxapi.QueryTableResult) (string, v1alpha1.AnalysisPhase, error) { - var res []interface{} + var res []any if result == nil { return "", v1alpha1.AnalysisPhaseError, fmt.Errorf("no QueryTableResult returned from flux query") } diff --git a/metricproviders/influxdb/mock_test.go b/metricproviders/influxdb/mock_test.go index 3aff084802..558efa9a45 100644 --- a/metricproviders/influxdb/mock_test.go +++ b/metricproviders/influxdb/mock_test.go @@ -23,10 +23,10 @@ func (m mockAPI) QueryRaw(context.Context, string, *domain.Dialect) (string, err panic("Not used") } -func (m mockAPI) QueryRawWithParams(ctx context.Context, query string, dialect *domain.Dialect, params interface{}) (string, error) { +func (m mockAPI) QueryRawWithParams(ctx context.Context, query string, dialect *domain.Dialect, params any) (string, error) { panic("Not used") } -func (m mockAPI) QueryWithParams(ctx context.Context, query string, params interface{}) (*influxapi.QueryTableResult, error) { +func (m mockAPI) QueryWithParams(ctx context.Context, query string, params any) (*influxapi.QueryTableResult, error) { panic("Not used") } diff --git a/metricproviders/job/job.go b/metricproviders/job/job.go index 6a6d866a4c..5a85c5c71e 100644 --- a/metricproviders/job/job.go +++ b/metricproviders/job/job.go @@ -11,6 +11,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" batchlisters "k8s.io/client-go/listers/batch/v1" @@ -24,8 +25,12 @@ const ( ProviderType = "Job" // JobNameKey is the measurement's metadata key holding the job name associated with the measurement JobNameKey = "job-name" + // JobNamespaceKey is the measurement's metadata key holding the job namespace associated with the measurement + JobNamespaceKey = "job-namespace" // AnalysisRunNameAnnotationKey is the job's annotation key containing the name of the controller AnalysisRun AnalysisRunNameAnnotationKey = "analysisrun.argoproj.io/name" + // AnalysisRunNamespaceAnnotationKey is the job's annotation key containing the namespace of the controller AnalysisRun + AnalysisRunNamespaceAnnotationKey = "analysisrun.argoproj.io/namespace" // AnalysisRunMetricLabelKey is the job's annotation key containing the name of the associated AnalysisRun metric AnalysisRunMetricAnnotationKey = "analysisrun.argoproj.io/metric-name" // AnalysisRunUIDLabelKey is the job's label key containing the uid of the associated AnalysisRun @@ -38,16 +43,20 @@ var ( ) type JobProvider struct { - kubeclientset kubernetes.Interface - jobLister batchlisters.JobLister - logCtx log.Entry + kubeclientset kubernetes.Interface + jobLister batchlisters.JobLister + logCtx log.Entry + jobNamespace string + customJobKubeconfig bool } -func NewJobProvider(logCtx log.Entry, kubeclientset kubernetes.Interface, jobLister batchlisters.JobLister) *JobProvider { +func NewJobProvider(logCtx log.Entry, kubeclientset kubernetes.Interface, jobLister batchlisters.JobLister, jobNS string, customJobKubeconfig bool) *JobProvider { return &JobProvider{ - kubeclientset: kubeclientset, - logCtx: logCtx, - jobLister: jobLister, + kubeclientset: kubeclientset, + logCtx: logCtx, + jobLister: jobLister, + jobNamespace: jobNS, + customJobKubeconfig: customJobKubeconfig, } } @@ -78,7 +87,11 @@ func getJobIDSuffix(run *v1alpha1.AnalysisRun, metricName string) int { return int(res.Count + res.Error + 1) } -func newMetricJob(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) (*batchv1.Job, error) { +func newMetricJob(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, jobNS string, customJobKubeconfig bool) (*batchv1.Job, error) { + ns := run.Namespace + if jobNS != "" { + ns = jobNS + } jobAnnotations := metric.Provider.Job.Metadata.GetAnnotations() jobLabels := metric.Provider.Job.Metadata.GetLabels() if jobAnnotations == nil { @@ -89,12 +102,19 @@ func newMetricJob(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) (*batchv1.J } jobLabels[AnalysisRunUIDLabelKey] = string(run.UID) jobAnnotations[AnalysisRunNameAnnotationKey] = run.Name + jobAnnotations[AnalysisRunNamespaceAnnotationKey] = run.Namespace jobAnnotations[AnalysisRunMetricAnnotationKey] = metric.Name + + ownerRef := []metav1.OwnerReference{*metav1.NewControllerRef(run, analysisRunGVK)} + + if ns != run.Namespace || customJobKubeconfig { + ownerRef = nil + } job := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: newJobName(run, metric), - Namespace: run.Namespace, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(run, analysisRunGVK)}, + Namespace: ns, + OwnerReferences: ownerRef, Annotations: jobAnnotations, Labels: jobLabels, }, @@ -110,12 +130,12 @@ func (p *JobProvider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1a StartedAt: &now, Phase: v1alpha1.AnalysisPhaseRunning, } - job, err := newMetricJob(run, metric) + job, err := newMetricJob(run, metric, p.jobNamespace, p.customJobKubeconfig) if err != nil { p.logCtx.Errorf("job initialization failed: %v", err) return metricutil.MarkMeasurementError(measurement, err) } - jobIf := p.kubeclientset.BatchV1().Jobs(run.Namespace) + jobIf := p.kubeclientset.BatchV1().Jobs(job.Namespace) createdJob, createErr := jobIf.Create(ctx, job, metav1.CreateOptions{}) if createErr != nil { if !k8serrors.IsAlreadyExists(createErr) { @@ -127,8 +147,17 @@ func (p *JobProvider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1a p.logCtx.Errorf("job create (verify) %s failed: %v", job.Name, createErr) return metricutil.MarkMeasurementError(measurement, createErr) } - controllerRef := metav1.GetControllerOf(existingJob) - if run.UID != controllerRef.UID { + ownerUID := "" + // if custom kubeconfig or different namespace is used owner ref is absent, + // use run uid label to get owner analysis run uid + if p.customJobKubeconfig || job.Namespace != run.Namespace { + ownerUID = job.Labels[AnalysisRunUIDLabelKey] + } else { + controllerRef := metav1.GetControllerOf(existingJob) + ownerUID = string(controllerRef.UID) + } + + if string(run.UID) != ownerUID { // NOTE: we don't bother to check for semantic equality. UID is good enough p.logCtx.Errorf("job create (uid check) %s failed: %v", job.Name, createErr) return metricutil.MarkMeasurementError(measurement, createErr) @@ -137,19 +166,20 @@ func (p *JobProvider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1a createdJob = existingJob } measurement.Metadata = map[string]string{ - JobNameKey: createdJob.Name, + JobNameKey: createdJob.Name, + JobNamespaceKey: createdJob.Namespace, } p.logCtx.Infof("job %s/%s created", createdJob.Namespace, createdJob.Name) return measurement } func (p *JobProvider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { - jobName, err := getJobName(measurement) + jobName, err := getJobNamespacedName(measurement, run.Namespace) now := timeutil.MetaNow() if err != nil { return metricutil.MarkMeasurementError(measurement, err) } - job, err := p.jobLister.Jobs(run.Namespace).Get(jobName) + job, err := p.jobLister.Jobs(jobName.Namespace).Get(jobName.Name) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } @@ -170,26 +200,39 @@ func (p *JobProvider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, } func (p *JobProvider) Terminate(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { - jobName, err := getJobName(measurement) + jobName, err := getJobNamespacedName(measurement, run.Namespace) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } - err = p.deleteJob(run.Namespace, jobName) + err = p.deleteJob(jobName.Namespace, jobName.Name) if err != nil { return metricutil.MarkMeasurementError(measurement, err) } now := timeutil.MetaNow() measurement.FinishedAt = &now measurement.Phase = v1alpha1.AnalysisPhaseSuccessful - p.logCtx.Infof("job %s/%s terminated", run.Namespace, jobName) + p.logCtx.Infof("job %s/%s terminated", jobName.Namespace, jobName.Name) return measurement } -func getJobName(measurement v1alpha1.Measurement) (string, error) { - if measurement.Metadata != nil && measurement.Metadata[JobNameKey] != "" { - return measurement.Metadata[JobNameKey], nil +func getJobNamespacedName(measurement v1alpha1.Measurement, defaultNS string) (types.NamespacedName, error) { + name := types.NamespacedName{ + Namespace: defaultNS, + Name: "", + } + if measurement.Metadata != nil { + if measurement.Metadata[JobNameKey] != "" { + name.Name = measurement.Metadata[JobNameKey] + } else { + return name, errors.New("job metadata reference missing") + } + if measurement.Metadata[JobNamespaceKey] != "" { + name.Namespace = measurement.Metadata[JobNamespaceKey] + } + } else { + return name, errors.New("job metadata reference missing") } - return "", errors.New("job metadata reference missing") + return name, nil } func (p *JobProvider) deleteJob(namespace, jobName string) error { @@ -220,11 +263,11 @@ func (p *JobProvider) GarbageCollect(run *v1alpha1.AnalysisRun, metric v1alpha1. totalJobs := len(jobs) if totalJobs > limit { for i := 0; i < totalJobs-limit; i++ { - err = p.deleteJob(run.Namespace, jobs[i].Name) + err = p.deleteJob(jobs[i].Namespace, jobs[i].Name) if err != nil { return err } - p.logCtx.Infof("job %s/%s garbage collected", run.Namespace, jobs[i].Name) + p.logCtx.Infof("job %s/%s garbage collected", jobs[i].Namespace, jobs[i].Name) } } return nil diff --git a/metricproviders/job/job_test.go b/metricproviders/job/job_test.go index fd278afec8..80fdb28342 100644 --- a/metricproviders/job/job_test.go +++ b/metricproviders/job/job_test.go @@ -36,7 +36,7 @@ func newTestJobProvider(objects ...runtime.Object) *JobProvider { cancel() jobLister := k8sI.Batch().V1().Jobs().Lister() - return NewJobProvider(*logCtx, kubeclient, jobLister) + return NewJobProvider(*logCtx, kubeclient, jobLister, "", false) } func newRunWithJobMetric() *v1alpha1.AnalysisRun { @@ -193,7 +193,7 @@ func TestRunCreateCollision(t *testing.T) { p := newTestJobProvider() run := newRunWithJobMetric() - existingJob, err := newMetricJob(run, run.Spec.Metrics[0]) + existingJob, err := newMetricJob(run, run.Spec.Metrics[0], p.jobNamespace, p.customJobKubeconfig) assert.NoError(t, err) fakeClient := p.kubeclientset.(*k8sfake.Clientset) fakeClient.Tracker().Add(existingJob) diff --git a/metricproviders/kayenta/kayenta.go b/metricproviders/kayenta/kayenta.go index a6b93f40e6..b623aa7730 100644 --- a/metricproviders/kayenta/kayenta.go +++ b/metricproviders/kayenta/kayenta.go @@ -67,26 +67,20 @@ func (p *Provider) GetMetadata(metric v1alpha1.Metric) map[string]string { } func getCanaryConfigId(metric v1alpha1.Metric, p *Provider) (string, error) { - configIdLookupURL := fmt.Sprintf(configIdLookupURLFormat, metric.Provider.Kayenta.Address, metric.Provider.Kayenta.Application, metric.Provider.Kayenta.StorageAccountName) response, err := p.client.Get(configIdLookupURL) - if err != nil || response.Body == nil || response.StatusCode != 200 { - if err == nil { - err = errors.New("Invalid Response") - } + if err != nil { return "", err } + defer response.Body.Close() - data, err := io.ReadAll(response.Body) - if err != nil { - return "", err + if response.StatusCode != 200 { + return "", fmt.Errorf("Invalid Response: HTTP %d", response.StatusCode) } var cc []canaryConfig - - err = json.Unmarshal(data, &cc) - if err != nil { + if err := json.NewDecoder(response.Body).Decode(&cc); err != nil { return "", err } @@ -96,7 +90,7 @@ func getCanaryConfigId(metric v1alpha1.Metric, p *Provider) (string, error) { } } - return "", err + return "", errors.New("Canary config not found") } // Run queries kayentd for the metric @@ -146,7 +140,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph if err != nil { return metricutil.MarkMeasurementError(newMeasurement, err) } - var dat map[string]interface{} + var dat map[string]any if err := json.Unmarshal(data, &dat); err != nil { return metricutil.MarkMeasurementError(newMeasurement, err) } @@ -185,7 +179,7 @@ func (p *Provider) Resume(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric, mea return metricutil.MarkMeasurementError(measurement, err) } - patch := make(map[string]interface{}) + patch := make(map[string]any) err = json.Unmarshal(data, &patch) if err != nil { diff --git a/metricproviders/kayenta/kayenta_test.go b/metricproviders/kayenta/kayenta_test.go index abfed0f7c1..548f868bfb 100644 --- a/metricproviders/kayenta/kayenta_test.go +++ b/metricproviders/kayenta/kayenta_test.go @@ -157,12 +157,12 @@ func TestRunSuccessfully(t *testing.T) { if err != nil { panic(err) } - bodyI := map[string]interface{}{} + bodyI := map[string]any{} err = json.Unmarshal(body, &bodyI) if err != nil { panic(err) } - expectedBodyI := map[string]interface{}{} + expectedBodyI := map[string]any{} err = json.Unmarshal([]byte(expectedBody), &expectedBodyI) if err != nil { panic(err) diff --git a/metricproviders/metricproviders.go b/metricproviders/metricproviders.go index f73ff15e88..916fc85aa2 100644 --- a/metricproviders/metricproviders.go +++ b/metricproviders/metricproviders.go @@ -2,10 +2,13 @@ package metricproviders import ( "fmt" + "os" "github.com/argoproj/argo-rollouts/metric" "github.com/argoproj/argo-rollouts/metricproviders/influxdb" "github.com/argoproj/argo-rollouts/metricproviders/skywalking" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "github.com/argoproj/argo-rollouts/metricproviders/cloudwatch" "github.com/argoproj/argo-rollouts/metricproviders/datadog" @@ -26,6 +29,12 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) +const ( + InclusterKubeconfig = "in-cluster" + AnalysisJobKubeconfigEnv = "ARGO_ROLLOUTS_ANALYSIS_JOB_KUBECONFIG" + AnalysisJobNamespaceEnv = "ARGO_ROLLOUTS_ANALYSIS_JOB_NAMESPACE" +) + type ProviderFactory struct { KubeClient kubernetes.Interface JobLister batchlisters.JobLister @@ -43,19 +52,27 @@ func (f *ProviderFactory) NewProvider(logCtx log.Entry, metric v1alpha1.Metric) } return prometheus.NewPrometheusProvider(api, logCtx, metric) case job.ProviderType: - return job.NewJobProvider(logCtx, f.KubeClient, f.JobLister), nil + kubeClient, customKubeconfig, err := GetAnalysisJobClientset(f.KubeClient) + if err != nil { + return nil, err + } + + return job.NewJobProvider(logCtx, kubeClient, f.JobLister, GetAnalysisJobNamespace(), customKubeconfig), nil case kayenta.ProviderType: c := kayenta.NewHttpClient() return kayenta.NewKayentaProvider(logCtx, c), nil case webmetric.ProviderType: - c := webmetric.NewWebMetricHttpClient(metric) + c, err := webmetric.NewWebMetricHttpClient(metric) + if err != nil { + return nil, err + } p, err := webmetric.NewWebMetricJsonParser(metric) if err != nil { return nil, err } return webmetric.NewWebMetricProvider(logCtx, c, p), nil case datadog.ProviderType: - return datadog.NewDatadogProvider(logCtx, f.KubeClient) + return datadog.NewDatadogProvider(logCtx, f.KubeClient, metric) case wavefront.ProviderType: client, err := wavefront.NewWavefrontAPI(metric, f.KubeClient) if err != nil { @@ -132,3 +149,32 @@ func Type(metric v1alpha1.Metric) string { return "Unknown Provider" } + +// GetAnalysisJobClientset returns kubernetes clientset for executing the analysis job metric, +// if the AnalysisJobKubeconfigEnv is set to InclusterKubeconfig, it will return the incluster client +// else if it's set to a kubeconfig file it will return the clientset corresponding to the kubeconfig file. +// If empty it returns the provided defaultClientset +func GetAnalysisJobClientset(defaultClientset kubernetes.Interface) (kubernetes.Interface, bool, error) { + customJobKubeconfig := os.Getenv(AnalysisJobKubeconfigEnv) + if customJobKubeconfig != "" { + var ( + cfg *rest.Config + err error + ) + if customJobKubeconfig == InclusterKubeconfig { + cfg, err = rest.InClusterConfig() + } else { + cfg, err = clientcmd.BuildConfigFromFlags("", customJobKubeconfig) + } + if err != nil { + return nil, true, err + } + clientSet, err := kubernetes.NewForConfig(cfg) + return clientSet, true, err + } + return defaultClientset, false, nil +} + +func GetAnalysisJobNamespace() string { + return os.Getenv(AnalysisJobNamespaceEnv) +} diff --git a/metricproviders/newrelic/newrelic.go b/metricproviders/newrelic/newrelic.go index eca410b6e7..0c971e5c60 100644 --- a/metricproviders/newrelic/newrelic.go +++ b/metricproviders/newrelic/newrelic.go @@ -78,7 +78,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph return newMeasurement } -func toJSONString(v interface{}) (string, error) { +func toJSONString(v any) (string, error) { b, err := json.Marshal(v) if err != nil { return "", err diff --git a/metricproviders/newrelic/newrelic_test.go b/metricproviders/newrelic/newrelic_test.go index b483d96872..8f49049815 100644 --- a/metricproviders/newrelic/newrelic_test.go +++ b/metricproviders/newrelic/newrelic_test.go @@ -31,7 +31,7 @@ func TestType(t *testing.T) { func TestRunSuccessfully(t *testing.T) { e := log.Entry{} mock := &mockAPI{ - response: []nrdb.NRDBResult{map[string]interface{}{"count": 10}}, + response: []nrdb.NRDBResult{map[string]any{"count": 10}}, } p := NewNewRelicProvider(mock, e) metric := v1alpha1.Metric{ @@ -58,9 +58,9 @@ func TestRunWithTimeseries(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ response: []nrdb.NRDBResult{ - map[string]interface{}{"count": 10}, - map[string]interface{}{"count": 20}, - map[string]interface{}{"count": 30}}, + map[string]any{"count": 10}, + map[string]any{"count": 20}, + map[string]any{"count": 30}}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -86,7 +86,7 @@ func TestRunWithTimeseries(t *testing.T) { func TestRunWithFacet(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - response: []nrdb.NRDBResult{map[string]interface{}{"count": 10, "average.duration": 12.34}}, + response: []nrdb.NRDBResult{map[string]any{"count": 10, "average.duration": 12.34}}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -112,7 +112,7 @@ func TestRunWithFacet(t *testing.T) { func TestRunWithMultipleSelectTerms(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - response: []nrdb.NRDBResult{map[string]interface{}{"count": 10}}, + response: []nrdb.NRDBResult{map[string]any{"count": 10}}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -139,7 +139,7 @@ func TestRunWithEmptyResult(t *testing.T) { e := log.NewEntry(log.New()) expectedErr := fmt.Errorf("no results returned from NRQL query") mock := &mockAPI{ - response: []nrdb.NRDBResult{make(map[string]interface{})}, + response: []nrdb.NRDBResult{make(map[string]any)}, } p := NewNewRelicProvider(mock, *e) metric := v1alpha1.Metric{ @@ -245,7 +245,7 @@ func TestRunWithInvalidJSON(t *testing.T) { } t.Run("with a single result map", func(t *testing.T) { mock := &mockAPI{ - response: []nrdb.NRDBResult{map[string]interface{}{"func": func() {}}}, + response: []nrdb.NRDBResult{map[string]any{"func": func() {}}}, } p := NewNewRelicProvider(mock, *e) measurement := p.Run(newAnalysisRun(), metric) @@ -258,7 +258,7 @@ func TestRunWithInvalidJSON(t *testing.T) { t.Run("with multiple results", func(t *testing.T) { // cover branch where results slice is longer than 1 mock := &mockAPI{ - response: []nrdb.NRDBResult{map[string]interface{}{"key": "value"}, map[string]interface{}{"func": func() {}}}, + response: []nrdb.NRDBResult{map[string]any{"key": "value"}, map[string]any{"func": func() {}}}, } p := NewNewRelicProvider(mock, *e) measurement := p.Run(newAnalysisRun(), metric) diff --git a/metricproviders/plugin/client/client.go b/metricproviders/plugin/client/client.go index 97e76e0b0f..9fc82ed244 100644 --- a/metricproviders/plugin/client/client.go +++ b/metricproviders/plugin/client/client.go @@ -54,7 +54,7 @@ func (m *metricPlugin) startPluginSystem(metric v1alpha1.Metric) (rpc.MetricProv // There should only ever be one plugin defined in metric.Provider.Plugin per analysis template this gets checked // during validation for pluginName := range metric.Provider.Plugin { - pluginPath, err := plugin.GetPluginLocation(pluginName) + pluginPath, args, err := plugin.GetPluginInfo(pluginName) if err != nil { return nil, fmt.Errorf("unable to find plugin (%s): %w", pluginName, err) } @@ -64,7 +64,7 @@ func (m *metricPlugin) startPluginSystem(metric v1alpha1.Metric) (rpc.MetricProv m.pluginClient[pluginName] = goPlugin.NewClient(&goPlugin.ClientConfig{ HandshakeConfig: handshakeConfig, Plugins: pluginMap, - Cmd: exec.Command(pluginPath), + Cmd: exec.Command(pluginPath, args...), Managed: true, }) diff --git a/metricproviders/plugin/rpc/rpc.go b/metricproviders/plugin/rpc/rpc.go index 7f703b5ec9..a709693976 100644 --- a/metricproviders/plugin/rpc/rpc.go +++ b/metricproviders/plugin/rpc/rpc.go @@ -56,7 +56,7 @@ type MetricsPluginRPC struct{ client *rpc.Client } // server side function. func (g *MetricsPluginRPC) InitPlugin() types.RpcError { var resp types.RpcError - err := g.client.Call("Plugin.InitPlugin", new(interface{}), &resp) + err := g.client.Call("Plugin.InitPlugin", new(any), &resp) if err != nil { return types.RpcError{ErrorString: fmt.Sprintf("InitPlugin rpc call error: %s", err)} } @@ -66,7 +66,7 @@ func (g *MetricsPluginRPC) InitPlugin() types.RpcError { // Run is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) Run(analysisRun *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alpha1.Measurement { var resp v1alpha1.Measurement - var args interface{} = RunArgs{ + var args any = RunArgs{ AnalysisRun: analysisRun, Metric: metric, } @@ -83,7 +83,7 @@ func (g *MetricsPluginRPC) Run(analysisRun *v1alpha1.AnalysisRun, metric v1alpha // Resume is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) Resume(analysisRun *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { var resp v1alpha1.Measurement - var args interface{} = TerminateAndResumeArgs{ + var args any = TerminateAndResumeArgs{ AnalysisRun: analysisRun, Metric: metric, Measurement: measurement, @@ -101,7 +101,7 @@ func (g *MetricsPluginRPC) Resume(analysisRun *v1alpha1.AnalysisRun, metric v1al // Terminate is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) Terminate(analysisRun *v1alpha1.AnalysisRun, metric v1alpha1.Metric, measurement v1alpha1.Measurement) v1alpha1.Measurement { var resp v1alpha1.Measurement - var args interface{} = TerminateAndResumeArgs{ + var args any = TerminateAndResumeArgs{ AnalysisRun: analysisRun, Metric: metric, Measurement: measurement, @@ -119,7 +119,7 @@ func (g *MetricsPluginRPC) Terminate(analysisRun *v1alpha1.AnalysisRun, metric v // GarbageCollect is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) GarbageCollect(analysisRun *v1alpha1.AnalysisRun, metric v1alpha1.Metric, limit int) types.RpcError { var resp types.RpcError - var args interface{} = GarbageCollectArgs{ + var args any = GarbageCollectArgs{ AnalysisRun: analysisRun, Metric: metric, Limit: limit, @@ -134,7 +134,7 @@ func (g *MetricsPluginRPC) GarbageCollect(analysisRun *v1alpha1.AnalysisRun, met // Type is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) Type() string { var resp string - err := g.client.Call("Plugin.Type", new(interface{}), &resp) + err := g.client.Call("Plugin.Type", new(any), &resp) if err != nil { return fmt.Sprintf("Type rpc call error: %s", err) } @@ -144,7 +144,7 @@ func (g *MetricsPluginRPC) Type() string { // GetMetadata is the client side function that is wrapped by a local provider this makes an rpc call to the server side function. func (g *MetricsPluginRPC) GetMetadata(metric v1alpha1.Metric) map[string]string { var resp map[string]string - var args interface{} = GetMetadataArgs{ + var args any = GetMetadataArgs{ Metric: metric, } err := g.client.Call("Plugin.GetMetadata", &args, &resp) @@ -166,14 +166,14 @@ type MetricsRPCServer struct { // InitPlugin is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) InitPlugin(args interface{}, resp *types.RpcError) error { +func (s *MetricsRPCServer) InitPlugin(args any, resp *types.RpcError) error { *resp = s.Impl.InitPlugin() return nil } // Run is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) Run(args interface{}, resp *v1alpha1.Measurement) error { +func (s *MetricsRPCServer) Run(args any, resp *v1alpha1.Measurement) error { runArgs, ok := args.(*RunArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -184,7 +184,7 @@ func (s *MetricsRPCServer) Run(args interface{}, resp *v1alpha1.Measurement) err // Resume is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) Resume(args interface{}, resp *v1alpha1.Measurement) error { +func (s *MetricsRPCServer) Resume(args any, resp *v1alpha1.Measurement) error { resumeArgs, ok := args.(*TerminateAndResumeArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -195,7 +195,7 @@ func (s *MetricsRPCServer) Resume(args interface{}, resp *v1alpha1.Measurement) // Terminate is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) Terminate(args interface{}, resp *v1alpha1.Measurement) error { +func (s *MetricsRPCServer) Terminate(args any, resp *v1alpha1.Measurement) error { resumeArgs, ok := args.(*TerminateAndResumeArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -206,7 +206,7 @@ func (s *MetricsRPCServer) Terminate(args interface{}, resp *v1alpha1.Measuremen // GarbageCollect is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) GarbageCollect(args interface{}, resp *types.RpcError) error { +func (s *MetricsRPCServer) GarbageCollect(args any, resp *types.RpcError) error { gcArgs, ok := args.(*GarbageCollectArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -217,14 +217,14 @@ func (s *MetricsRPCServer) GarbageCollect(args interface{}, resp *types.RpcError // Type is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) Type(args interface{}, resp *string) error { +func (s *MetricsRPCServer) Type(args any, resp *string) error { *resp = s.Impl.Type() return nil } // GetMetadata is the receiving end of the RPC call running in the plugin executable process (the server), and it calls the // implementation of the plugin. -func (s *MetricsRPCServer) GetMetadata(args interface{}, resp *map[string]string) error { +func (s *MetricsRPCServer) GetMetadata(args any, resp *map[string]string) error { getMetadataArgs, ok := args.(*GetMetadataArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -248,10 +248,10 @@ type RpcMetricProviderPlugin struct { Impl MetricProviderPlugin } -func (p *RpcMetricProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { +func (p *RpcMetricProviderPlugin) Server(*plugin.MuxBroker) (any, error) { return &MetricsRPCServer{Impl: p.Impl}, nil } -func (RpcMetricProviderPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { +func (RpcMetricProviderPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (any, error) { return &MetricsPluginRPC{client: c}, nil } diff --git a/metricproviders/prometheus/prometheus.go b/metricproviders/prometheus/prometheus.go index 6d1d521fb0..7473693525 100644 --- a/metricproviders/prometheus/prometheus.go +++ b/metricproviders/prometheus/prometheus.go @@ -17,6 +17,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" log "github.com/sirupsen/logrus" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/evaluate" @@ -164,6 +166,7 @@ func NewPrometheusProvider(api v1.API, logCtx log.Entry, metric v1alpha1.Metric) // NewPrometheusAPI generates a prometheus API from the metric configuration func NewPrometheusAPI(metric v1alpha1.Metric) (v1.API, error) { envValuesByKey := make(map[string]string) + if value, ok := os.LookupEnv(fmt.Sprintf("%s", EnvVarArgoRolloutsPrometheusAddress)); ok { envValuesByKey[EnvVarArgoRolloutsPrometheusAddress] = value log.Debugf("ARGO_ROLLOUTS_PROMETHEUS_ADDRESS: %v", envValuesByKey[EnvVarArgoRolloutsPrometheusAddress]) @@ -203,25 +206,42 @@ func NewPrometheusAPI(metric v1alpha1.Metric) (v1.API, error) { } } - prometheusApiConfig := api.Config{ - Address: metric.Provider.Prometheus.Address, - RoundTripper: roundTripper, - } - //Check if using Amazon Managed Prometheus if true build sigv4 client - if strings.Contains(metric.Provider.Prometheus.Address, "aps-workspaces") { + if strings.Contains(metric.Provider.Prometheus.Address, "aps-workspaces") && (v1alpha1.Sigv4Config{}) != metric.Provider.Prometheus.Authentication.Sigv4 { cfg := sigv4.SigV4Config{ Region: metric.Provider.Prometheus.Authentication.Sigv4.Region, Profile: metric.Provider.Prometheus.Authentication.Sigv4.Profile, RoleARN: metric.Provider.Prometheus.Authentication.Sigv4.RoleARN, } - var next http.RoundTripper - sigv4RoundTripper, err := sigv4.NewSigV4RoundTripper(&cfg, next) + sigv4RoundTripper, err := sigv4.NewSigV4RoundTripper(&cfg, roundTripper) if err != nil { log.Errorf("Error creating SigV4 RoundTripper: %v", err) return nil, err } - prometheusApiConfig.RoundTripper = sigv4RoundTripper + roundTripper = sigv4RoundTripper + } + + httpClient := &http.Client{ + Transport: roundTripper, + } + + if metric.Provider.Prometheus.Authentication.OAuth2.TokenURL != "" { + if metric.Provider.Prometheus.Authentication.OAuth2.ClientID == "" || metric.Provider.Prometheus.Authentication.OAuth2.ClientSecret == "" { + return nil, errors.New("missing mandatory parameter in metric for OAuth2 setup") + } + oauthCfg := &clientcredentials.Config{ + ClientID: metric.Provider.Prometheus.Authentication.OAuth2.ClientID, + ClientSecret: metric.Provider.Prometheus.Authentication.OAuth2.ClientSecret, + TokenURL: metric.Provider.Prometheus.Authentication.OAuth2.TokenURL, + Scopes: metric.Provider.Prometheus.Authentication.OAuth2.Scopes, + } + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpClient) + httpClient = oauthCfg.Client(ctx) + } + + prometheusApiConfig := api.Config{ + Address: metric.Provider.Prometheus.Address, + Client: httpClient, } client, err := api.NewClient(prometheusApiConfig) @@ -229,6 +249,7 @@ func NewPrometheusAPI(metric v1alpha1.Metric) (v1.API, error) { log.Errorf("Error in getting prometheus client: %v", err) return nil, err } + return v1.NewAPI(client), nil } diff --git a/metricproviders/prometheus/prometheus_test.go b/metricproviders/prometheus/prometheus_test.go index 8b077930a5..f54d6d6c61 100644 --- a/metricproviders/prometheus/prometheus_test.go +++ b/metricproviders/prometheus/prometheus_test.go @@ -3,7 +3,10 @@ package prometheus import ( "fmt" "math" + "net/http" + "net/http/httptest" "os" + "strings" "testing" "time" @@ -15,6 +18,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + AccessToken = "MyAccessToken" +) + +type OAuthResponse struct { + TokenType string `json:"token_type,omitempty"` + AccessToken string `json:"access_token,omitempty"` + Expiry string `json:"expires_in,omitempty"` +} + func newScalar(f float64) model.Value { return &model.Scalar{ Value: model.SampleValue(f), @@ -514,3 +527,209 @@ func TestNewPrometheusNegativeTimeout(t *testing.T) { assert.NotNil(t, err) assert.Nil(t, p) } + +func TestRunSuccessfulWithOAuth(t *testing.T) { + e := log.Entry{} + promServer := mockPromServer(AccessToken) + oAuthServer := mockOAuthServer(AccessToken) + defer promServer.Close() + defer oAuthServer.Close() + + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: promServer.URL, + Query: "test", + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: oAuthServer.URL + "/ok", + ClientID: "someId", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + api, err := NewPrometheusAPI(metric) + assert.NoError(t, err) + p, err := NewPrometheusProvider(api, e, metric) + + measurement := p.Run(newAnalysisRun(), metric) + assert.NotNil(t, measurement.StartedAt) + assert.NoError(t, err) + assert.Equal(t, "[10]", measurement.Value) + assert.NotNil(t, measurement.FinishedAt) + assert.Equal(t, v1alpha1.AnalysisPhaseSuccessful, measurement.Phase) +} + +func TestNewPromApiErrorWithIncompleteOAuthParams(t *testing.T) { + + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: "http://promurl", + Query: "test", + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + _, err := NewPrometheusAPI(metric) + assert.Error(t, err) + + metric = v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: "http://promurl", + Query: "test", + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientID: "someId", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + _, err = NewPrometheusAPI(metric) + assert.Error(t, err) + + metric = v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: "http://promurl", + Query: "test", + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientID: "someId", + ClientSecret: "mySecret", + }, + }, + }, + }, + } + _, err = NewPrometheusAPI(metric) + // scopes are optional + assert.NoError(t, err) +} + +func TestRunErrorOAuthFailure(t *testing.T) { + e := log.Entry{} + promServer := mockPromServer(AccessToken) + oAuthServer := mockOAuthServer(AccessToken) + defer promServer.Close() + defer oAuthServer.Close() + + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result[0] == 10", + FailureCondition: "result[0] != 10", + Provider: v1alpha1.MetricProvider{ + Prometheus: &v1alpha1.PrometheusMetric{ + Address: promServer.URL, + Query: "test", + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: oAuthServer.URL + "/ko", + ClientID: "someId", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + api, err := NewPrometheusAPI(metric) + assert.NoError(t, err) + p, err := NewPrometheusProvider(api, e, metric) + + measurement := p.Run(newAnalysisRun(), metric) + assert.NoError(t, err) + assert.Equal(t, v1alpha1.AnalysisPhaseError, measurement.Phase) +} + +func mockOAuthServer(accessToken string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.StandardLogger().Infof("Received oauth query") + switch strings.TrimSpace(r.URL.Path) { + case "/ok": + mockOAuthOKResponse(w, r, accessToken) + case "/ko": + mockOAuthKOResponse(w, r) + default: + http.NotFoundHandler().ServeHTTP(w, r) + } + })) +} + +func mockOAuthOKResponse(w http.ResponseWriter, r *http.Request, accessToken string) { + + oAuthResponse := fmt.Sprintf(`{"token_type":"Bearer","expires_in":3599,"access_token":"%s"}`, accessToken) + + sc := http.StatusOK + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(sc) + w.Write([]byte(oAuthResponse)) +} + +func mockOAuthKOResponse(w http.ResponseWriter, r *http.Request) { + sc := http.StatusUnauthorized + w.WriteHeader(sc) +} + +func mockPromServer(expectedAuthorizationHeader string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + log.StandardLogger().Infof("Received prom query") + + authorizationHeader := r.Header.Get("Authorization") + // Reject call if we don't find the expected oauth token + if expectedAuthorizationHeader != "" && ("Bearer "+expectedAuthorizationHeader) != authorizationHeader { + + log.StandardLogger().Infof("Authorization header not as expected, rejecting") + sc := http.StatusUnauthorized + w.WriteHeader(sc) + + } else { + log.StandardLogger().Infof("Authorization header as expected, continuing") + promResponse := `{"data":{"result":[{"metric":{"__name__":"myMetric"},"value":[0, "10"]}],"resultType":"vector"},"status":"success"}` + + sc := http.StatusOK + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(sc) + w.Write([]byte(promResponse)) + } + })) +} diff --git a/metricproviders/skywalking/mock_test.go b/metricproviders/skywalking/mock_test.go index 70a5b67867..147b172294 100644 --- a/metricproviders/skywalking/mock_test.go +++ b/metricproviders/skywalking/mock_test.go @@ -2,10 +2,10 @@ package skywalking type mockAPI struct { err error - results interface{} + results any } -func (m mockAPI) Query(query string) (interface{}, error) { +func (m mockAPI) Query(query string) (any, error) { if m.err != nil { return m.results, m.err } diff --git a/metricproviders/skywalking/skywalking.go b/metricproviders/skywalking/skywalking.go index eb343b1bdd..4b352100f9 100644 --- a/metricproviders/skywalking/skywalking.go +++ b/metricproviders/skywalking/skywalking.go @@ -29,7 +29,7 @@ const ( ) type SkyWalkingClientAPI interface { - Query(query string) (interface{}, error) + Query(query string) (any, error) } type SkyWalkingClient struct { @@ -38,7 +38,7 @@ type SkyWalkingClient struct { } // Query executes a GraphQL query against the given SkyWalking backend -func (n SkyWalkingClient) Query(query string) (interface{}, error) { +func (n SkyWalkingClient) Query(query string) (any, error) { ctx, cancel := context.WithTimeout(context.Background(), defaultQueryTimeout) defer cancel() @@ -48,7 +48,7 @@ func (n SkyWalkingClient) Query(query string) (interface{}, error) { End: time.Now().Format("2006-01-02 1504"), Step: "MINUTE", }) - var results interface{} + var results any err := n.Run(ctx, req, &results) return results, err } @@ -82,7 +82,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph return newMeasurement } -func toJSONString(v interface{}) (string, error) { +func toJSONString(v any) (string, error) { b, err := json.Marshal(v) if err != nil { return "", err @@ -90,7 +90,7 @@ func toJSONString(v interface{}) (string, error) { return string(b), nil } -func (p *Provider) processResponse(metric v1alpha1.Metric, result interface{}) (string, v1alpha1.AnalysisPhase, error) { +func (p *Provider) processResponse(metric v1alpha1.Metric, result any) (string, v1alpha1.AnalysisPhase, error) { if result == nil { return "", v1alpha1.AnalysisPhaseFailed, fmt.Errorf("no results returned from SkyWalking query") } diff --git a/metricproviders/skywalking/skywalking_test.go b/metricproviders/skywalking/skywalking_test.go index 47c1a60dc9..434def389c 100644 --- a/metricproviders/skywalking/skywalking_test.go +++ b/metricproviders/skywalking/skywalking_test.go @@ -25,7 +25,7 @@ func TestType(t *testing.T) { func TestRunSuccessfully(t *testing.T) { e := log.Entry{} mock := &mockAPI{ - results: map[string]interface{}{"count": 10}, + results: map[string]any{"count": 10}, } p := NewSkyWalkingProvider(mock, e) metric := v1alpha1.Metric{ @@ -51,10 +51,10 @@ func TestRunSuccessfully(t *testing.T) { func TestRunWithTimeseries(t *testing.T) { e := log.NewEntry(log.New()) mock := &mockAPI{ - results: []interface{}{ - map[string]interface{}{"count": 10}, - map[string]interface{}{"count": 20}, - map[string]interface{}{"count": 30}}, + results: []any{ + map[string]any{"count": 10}, + map[string]any{"count": 20}, + map[string]any{"count": 30}}, } p := NewSkyWalkingProvider(mock, *e) metric := v1alpha1.Metric{ @@ -107,7 +107,7 @@ func TestRunWithResolveArgsError(t *testing.T) { expectedErr := fmt.Errorf("failed to resolve {{args.var}}") mock := &mockAPI{ err: expectedErr, - results: map[string]interface{}{"A": "B"}, + results: map[string]any{"A": "B"}, } p := NewSkyWalkingProvider(mock, *e) metric := v1alpha1.Metric{ diff --git a/metricproviders/webmetric/webmetric.go b/metricproviders/webmetric/webmetric.go index e32531f92a..6561e72290 100644 --- a/metricproviders/webmetric/webmetric.go +++ b/metricproviders/webmetric/webmetric.go @@ -2,6 +2,7 @@ package webmetric import ( "bytes" + "context" "crypto/tls" "encoding/json" "errors" @@ -13,6 +14,8 @@ import ( "time" log "github.com/sirupsen/logrus" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" "k8s.io/client-go/util/jsonpath" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -119,7 +122,7 @@ func (p *Provider) Run(run *v1alpha1.AnalysisRun, metric v1alpha1.Metric) v1alph } func (p *Provider) parseResponse(metric v1alpha1.Metric, response *http.Response) (string, v1alpha1.AnalysisPhase, error) { - var data interface{} + var data any bodyBytes, err := io.ReadAll(response.Body) if err != nil { @@ -145,7 +148,7 @@ func (p *Provider) parseResponse(metric v1alpha1.Metric, response *http.Response return valString, status, err } -func getValue(fullResults [][]reflect.Value) (interface{}, string, error) { +func getValue(fullResults [][]reflect.Value) (any, string, error) { for _, results := range fullResults { for _, r := range results { val := r.Interface() @@ -173,8 +176,9 @@ func (p *Provider) GarbageCollect(run *v1alpha1.AnalysisRun, metric v1alpha1.Met return nil } -func NewWebMetricHttpClient(metric v1alpha1.Metric) *http.Client { +func NewWebMetricHttpClient(metric v1alpha1.Metric) (*http.Client, error) { var timeout time.Duration + var oauthCfg clientcredentials.Config // Using a default timeout of 10 seconds if metric.Provider.Web.TimeoutSeconds <= 0 { @@ -192,7 +196,19 @@ func NewWebMetricHttpClient(metric v1alpha1.Metric) *http.Client { } c.Transport = tr } - return c + if metric.Provider.Web.Authentication.OAuth2.TokenURL != "" { + if metric.Provider.Web.Authentication.OAuth2.ClientID == "" || metric.Provider.Web.Authentication.OAuth2.ClientSecret == "" { + return nil, errors.New("missing mandatory parameter in metric for OAuth2 setup") + } + oauthCfg = clientcredentials.Config{ + ClientID: metric.Provider.Web.Authentication.OAuth2.ClientID, + ClientSecret: metric.Provider.Web.Authentication.OAuth2.ClientSecret, + TokenURL: metric.Provider.Web.Authentication.OAuth2.TokenURL, + Scopes: metric.Provider.Web.Authentication.OAuth2.Scopes, + } + return oauthCfg.Client(context.WithValue(context.Background(), oauth2.HTTPClient, c)), nil + } + return c, nil } func NewWebMetricJsonParser(metric v1alpha1.Metric) (*jsonpath.JSONPath, error) { diff --git a/metricproviders/webmetric/webmetric_test.go b/metricproviders/webmetric/webmetric_test.go index 5ec0422ce1..1f8556a0eb 100644 --- a/metricproviders/webmetric/webmetric_test.go +++ b/metricproviders/webmetric/webmetric_test.go @@ -3,9 +3,11 @@ package webmetric import ( "bytes" "encoding/json" + "fmt" "io" "net/http" "net/http/httptest" + "strings" "testing" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -13,19 +15,30 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + AccessToken = "MyAccessToken" +) + func TestRunSuite(t *testing.T) { + + // Start OAuth server + oAuthServer := mockOAuthServer(AccessToken) + defer oAuthServer.Close() + // Test Cases var tests = []struct { - webServerStatus int - webServerResponse string - metric v1alpha1.Metric - expectedMethod string - expectedBody string - expectedValue string - expectedPhase v1alpha1.AnalysisPhase - expectedErrorMessage string - expectedJsonBody string + webServerStatus int + webServerResponse string + metric v1alpha1.Metric + expectedMethod string + expectedBody string + expectedValue string + expectedPhase v1alpha1.AnalysisPhase + expectedErrorMessage string + expectedJsonBody string + expectedAuthorizationHeader string }{ + // When_noJSONPathSpecified_And_MatchesConditions_Then_Succeed { webServerStatus: 200, @@ -651,6 +664,63 @@ func TestRunSuite(t *testing.T) { expectedValue: "use either Body or JSONBody; both cannot exists for WebMetric payload", expectedPhase: v1alpha1.AnalysisPhaseError, }, + // When_usingOAuth2_Then_Succeed + { + webServerStatus: 200, + webServerResponse: `{"a": 1, "b": true, "c": [1, 2, 3, 4], "d": null}`, + metric: v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result.a > 0 && result.b && all(result.c, {# < 5}) && result.d == nil", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{ + // URL: server.URL, + Headers: []v1alpha1.WebMetricHeader{{Key: "key", Value: "value"}}, + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: oAuthServer.URL + "/ok", + ClientID: "myClientID", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + }, + expectedAuthorizationHeader: AccessToken, + expectedValue: `{"a":1,"b":true,"c":[1,2,3,4],"d":null}`, + expectedPhase: v1alpha1.AnalysisPhaseSuccessful, + }, + // When_RejectedByOAuthServer_Then_Failure + { + webServerResponse: `Missing OAuth2 token`, + metric: v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result.a > 0 && result.b && all(result.c, {# < 5}) && result.d == nil", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{ + // URL: server.URL, + Headers: []v1alpha1.WebMetricHeader{{Key: "key", Value: "value"}}, + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: oAuthServer.URL + "/ko", + ClientID: "myClientID", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + }, + expectedAuthorizationHeader: AccessToken, + expectedErrorMessage: `oauth2: cannot fetch token: 401 Unauthorized`, + expectedPhase: v1alpha1.AnalysisPhaseError, + }, } // Run @@ -658,6 +728,18 @@ func TestRunSuite(t *testing.T) { for _, test := range tests { // Server setup with response server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + + authorizationHeader := req.Header.Get("Authorization") + // Reject call if we don't find the expected oauth token + if test.expectedAuthorizationHeader != "" && ("Bearer "+test.expectedAuthorizationHeader) != authorizationHeader { + + log.StandardLogger().Infof("Authorization header not as expected, rejecting") + sc := http.StatusUnauthorized + rw.WriteHeader(sc) + io.WriteString(rw, test.webServerResponse) + return + } + if test.expectedMethod != "" { assert.Equal(t, test.expectedMethod, req.Method) } @@ -694,7 +776,9 @@ func TestRunSuite(t *testing.T) { jsonparser, err := NewWebMetricJsonParser(test.metric) assert.NoError(t, err) - provider := NewWebMetricProvider(*logCtx, server.Client(), jsonparser) + client, err := NewWebMetricHttpClient(test.metric) + assert.NoError(t, err) + provider := NewWebMetricProvider(*logCtx, client, jsonparser) metricsMetadata := provider.GetMetadata(test.metric) assert.Nil(t, metricsMetadata) @@ -725,6 +809,109 @@ func TestRunSuite(t *testing.T) { } } +func TestNewPromApiErrorWithIncompleteOAuthParams(t *testing.T) { + + // Missing Client Id should fail + metric := v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result.a > 0 && result.b && all(result.c, {# < 5}) && result.d == nil", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{ + // URL: server.URL, + Headers: []v1alpha1.WebMetricHeader{{Key: "key", Value: "value"}}, + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientSecret: "mySecret", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + + _, err := NewWebMetricHttpClient(metric) + assert.Error(t, err) + + // Missing Client Secret should fail + metric = v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result.a > 0 && result.b && all(result.c, {# < 5}) && result.d == nil", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{ + // URL: server.URL, + Headers: []v1alpha1.WebMetricHeader{{Key: "key", Value: "value"}}, + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientID: "myClientID", + Scopes: []string{ + "myFirstScope", + "mySecondScope", + }, + }, + }, + }, + }, + } + _, err = NewWebMetricHttpClient(metric) + assert.Error(t, err) + + // Missing Scope should succeed + metric = v1alpha1.Metric{ + Name: "foo", + SuccessCondition: "result.a > 0 && result.b && all(result.c, {# < 5}) && result.d == nil", + Provider: v1alpha1.MetricProvider{ + Web: &v1alpha1.WebMetric{ + // URL: server.URL, + Headers: []v1alpha1.WebMetricHeader{{Key: "key", Value: "value"}}, + Authentication: v1alpha1.Authentication{ + OAuth2: v1alpha1.OAuth2Config{ + TokenURL: "http://tokenurl", + ClientID: "myClientID", + ClientSecret: "mySecret", + }, + }, + }, + }, + } + _, err = NewWebMetricHttpClient(metric) + assert.NoError(t, err) + +} + func newAnalysisRun() *v1alpha1.AnalysisRun { return &v1alpha1.AnalysisRun{} } + +func mockOAuthServer(accessToken string) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.StandardLogger().Infof("Received oauth query") + switch strings.TrimSpace(r.URL.Path) { + case "/ok": + mockOAuthOKResponse(w, r, accessToken) + case "/ko": + mockOAuthKOResponse(w, r) + default: + http.NotFoundHandler().ServeHTTP(w, r) + } + })) +} + +func mockOAuthOKResponse(w http.ResponseWriter, r *http.Request, accessToken string) { + + oAuthResponse := fmt.Sprintf(`{"token_type":"Bearer","expires_in":3599,"access_token":"%s"}`, accessToken) + + sc := http.StatusOK + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(sc) + w.Write([]byte(oAuthResponse)) +} + +func mockOAuthKOResponse(w http.ResponseWriter, r *http.Request) { + sc := http.StatusUnauthorized + w.WriteHeader(sc) +} diff --git a/pkg/apiclient/rollout/rollout.pb.go b/pkg/apiclient/rollout/rollout.pb.go index 54e4c099a2..f25c43ced0 100644 --- a/pkg/apiclient/rollout/rollout.pb.go +++ b/pkg/apiclient/rollout/rollout.pb.go @@ -727,6 +727,7 @@ type RolloutInfo struct { AnalysisRuns []*AnalysisRunInfo `protobuf:"bytes,18,rep,name=analysisRuns,proto3" json:"analysisRuns,omitempty"` Containers []*ContainerInfo `protobuf:"bytes,19,rep,name=containers,proto3" json:"containers,omitempty"` Steps []*v1alpha1.CanaryStep `protobuf:"bytes,20,rep,name=steps,proto3" json:"steps,omitempty"` + InitContainers []*ContainerInfo `protobuf:"bytes,21,rep,name=initContainers,proto3" json:"initContainers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -905,6 +906,13 @@ func (m *RolloutInfo) GetSteps() []*v1alpha1.CanaryStep { return nil } +func (m *RolloutInfo) GetInitContainers() []*ContainerInfo { + if m != nil { + return m.InitContainers + } + return nil +} + type ExperimentInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Icon string `protobuf:"bytes,2,opt,name=icon,proto3" json:"icon,omitempty"` @@ -1017,6 +1025,7 @@ type ReplicaSetInfo struct { Pods []*PodInfo `protobuf:"bytes,14,rep,name=pods,proto3" json:"pods,omitempty"` Ping bool `protobuf:"varint,15,opt,name=ping,proto3" json:"ping,omitempty"` Pong bool `protobuf:"varint,16,opt,name=pong,proto3" json:"pong,omitempty"` + InitContainerImages []string `protobuf:"bytes,17,rep,name=initContainerImages,proto3" json:"initContainerImages,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1167,6 +1176,13 @@ func (m *ReplicaSetInfo) GetPong() bool { return false } +func (m *ReplicaSetInfo) GetInitContainerImages() []string { + if m != nil { + return m.InitContainerImages + } + return nil +} + type PodInfo struct { ObjectMeta *v1.ObjectMeta `protobuf:"bytes,1,opt,name=objectMeta,proto3" json:"objectMeta,omitempty"` Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` @@ -1753,121 +1769,123 @@ func init() { } var fileDescriptor_99101d942e8912a7 = []byte{ - // 1821 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1c, 0x49, - 0x15, 0x57, 0x7b, 0x3c, 0xf6, 0xf8, 0x8d, 0xff, 0x96, 0xb3, 0xd9, 0xde, 0xd9, 0x60, 0x79, 0x7b, - 0x91, 0x70, 0x0c, 0x74, 0x3b, 0xde, 0x28, 0xcb, 0xf2, 0xe7, 0x60, 0x12, 0xcb, 0x1b, 0x94, 0xec, - 0x86, 0x36, 0xb0, 0x02, 0x09, 0xa2, 0x72, 0x4f, 0x79, 0xdc, 0x49, 0x4f, 0x57, 0xd3, 0x55, 0x3d, - 0x61, 0x64, 0xcd, 0x01, 0xbe, 0x00, 0x07, 0xbe, 0x02, 0x12, 0xe2, 0x84, 0x90, 0xb8, 0x70, 0xe0, - 0x8a, 0x38, 0x22, 0xf1, 0x05, 0x50, 0x84, 0x90, 0x38, 0x70, 0xe0, 0xc2, 0x19, 0xd5, 0xeb, 0xea, - 0xea, 0x3f, 0x1e, 0x27, 0x8e, 0x6c, 0x36, 0x39, 0x4d, 0xbf, 0xf7, 0xea, 0xbd, 0xf7, 0xab, 0xaa, - 0xf7, 0x5e, 0x55, 0xbd, 0x81, 0xf7, 0x93, 0xa7, 0x03, 0x8f, 0x26, 0x61, 0x10, 0x85, 0x2c, 0x96, - 0x5e, 0xca, 0xa3, 0x88, 0x67, 0xe6, 0xd7, 0x4d, 0x52, 0x2e, 0x39, 0x99, 0xd7, 0x64, 0xef, 0xc6, - 0x80, 0xf3, 0x41, 0xc4, 0x94, 0x82, 0x47, 0xe3, 0x98, 0x4b, 0x2a, 0x43, 0x1e, 0x8b, 0x7c, 0x58, - 0xef, 0xc1, 0x20, 0x94, 0x27, 0xd9, 0x91, 0x1b, 0xf0, 0xa1, 0x47, 0xd3, 0x01, 0x4f, 0x52, 0xfe, - 0x04, 0x3f, 0xbe, 0xaa, 0xf5, 0x85, 0xa7, 0xbd, 0x09, 0xcf, 0x70, 0x46, 0xb7, 0x68, 0x94, 0x9c, - 0xd0, 0x5b, 0xde, 0x80, 0xc5, 0x2c, 0xa5, 0x92, 0xf5, 0xb5, 0xb5, 0xdb, 0x4f, 0xbf, 0x26, 0xdc, - 0x90, 0xab, 0xe1, 0x43, 0x1a, 0x9c, 0x84, 0x31, 0x4b, 0xc7, 0xa5, 0xfe, 0x90, 0x49, 0xea, 0x8d, - 0xce, 0x6a, 0xbd, 0xab, 0x11, 0x22, 0x75, 0x94, 0x1d, 0x7b, 0x6c, 0x98, 0xc8, 0x71, 0x2e, 0x74, - 0xee, 0xc1, 0xaa, 0x9f, 0xfb, 0xbd, 0x1f, 0x1f, 0xf3, 0xef, 0x66, 0x2c, 0x1d, 0x13, 0x02, 0xb3, - 0x31, 0x1d, 0x32, 0xdb, 0xda, 0xb4, 0xb6, 0x16, 0x7c, 0xfc, 0x26, 0x37, 0x60, 0x41, 0xfd, 0x8a, - 0x84, 0x06, 0xcc, 0x9e, 0x41, 0x41, 0xc9, 0x70, 0x6e, 0xc3, 0xb5, 0x8a, 0x95, 0x07, 0xa1, 0x90, - 0xb9, 0xa5, 0x9a, 0x96, 0xd5, 0xd4, 0xfa, 0xa5, 0x05, 0x2b, 0x87, 0x4c, 0xde, 0x1f, 0xd2, 0x01, - 0xf3, 0xd9, 0x4f, 0x33, 0x26, 0x24, 0xb1, 0xa1, 0x58, 0x59, 0x3d, 0xbe, 0x20, 0x95, 0xad, 0x80, - 0xc7, 0x92, 0xaa, 0x59, 0x17, 0x08, 0x0c, 0x83, 0x5c, 0x83, 0x76, 0xa8, 0xec, 0xd8, 0x2d, 0x94, - 0xe4, 0x04, 0x59, 0x85, 0x96, 0xa4, 0x03, 0x7b, 0x16, 0x79, 0xea, 0xb3, 0x8e, 0xa8, 0xdd, 0x44, - 0x74, 0x02, 0xe4, 0xfb, 0x71, 0x9f, 0xeb, 0xb9, 0xbc, 0x1c, 0x53, 0x0f, 0x3a, 0x29, 0x1b, 0x85, - 0x22, 0xe4, 0x31, 0x42, 0x6a, 0xf9, 0x86, 0xae, 0x7b, 0x6a, 0x35, 0x3d, 0xdd, 0x87, 0xb7, 0x7c, - 0x26, 0x24, 0x4d, 0x65, 0xc3, 0xd9, 0xab, 0x2f, 0xfe, 0x8f, 0xe1, 0xad, 0x47, 0x29, 0x1f, 0x72, - 0xc9, 0x2e, 0x6b, 0x4a, 0x69, 0x1c, 0x67, 0x51, 0x84, 0x70, 0x3b, 0x3e, 0x7e, 0x3b, 0x07, 0xb0, - 0xbe, 0x77, 0xc4, 0xaf, 0x00, 0xe7, 0x01, 0xac, 0xfb, 0x4c, 0xa6, 0xe3, 0x4b, 0x1b, 0x7a, 0x0c, - 0x6b, 0xda, 0xc6, 0x67, 0x54, 0x06, 0x27, 0xfb, 0x23, 0x16, 0xa3, 0x19, 0x39, 0x4e, 0x8c, 0x19, - 0xf5, 0x4d, 0xee, 0x40, 0x37, 0x2d, 0xc3, 0x12, 0x0d, 0x75, 0x77, 0xaf, 0xb9, 0x45, 0x26, 0x57, - 0x42, 0xd6, 0xaf, 0x0e, 0x74, 0x1e, 0xc3, 0xd2, 0x27, 0x85, 0x37, 0xc5, 0x78, 0x71, 0x1c, 0x93, - 0x1d, 0x58, 0xa7, 0x23, 0x1a, 0x46, 0xf4, 0x28, 0x62, 0x46, 0x4f, 0xd8, 0x33, 0x9b, 0xad, 0xad, - 0x05, 0x7f, 0x9a, 0xc8, 0xb9, 0x0b, 0x2b, 0x8d, 0x7c, 0x21, 0x3b, 0xd0, 0x29, 0x0a, 0x80, 0x6d, - 0x6d, 0xb6, 0xce, 0x05, 0x6a, 0x46, 0x39, 0x1f, 0x42, 0xf7, 0x07, 0x2c, 0x55, 0xb1, 0x86, 0x18, - 0xb7, 0x60, 0xa5, 0x10, 0x69, 0xb6, 0x46, 0xda, 0x64, 0x3b, 0xbf, 0x99, 0x83, 0x6e, 0xc5, 0x24, - 0x79, 0x04, 0xc0, 0x8f, 0x9e, 0xb0, 0x40, 0x3e, 0x64, 0x92, 0xa2, 0x52, 0x77, 0x77, 0xc7, 0xcd, - 0x6b, 0x8d, 0x5b, 0xad, 0x35, 0x6e, 0xf2, 0x74, 0xa0, 0x18, 0xc2, 0x55, 0xb5, 0xc6, 0x1d, 0xdd, - 0x72, 0x3f, 0x35, 0x7a, 0x7e, 0xc5, 0x06, 0xb9, 0x0e, 0x73, 0x42, 0x52, 0x99, 0x09, 0xbd, 0x79, - 0x9a, 0x52, 0x99, 0x34, 0x64, 0x42, 0x94, 0x79, 0x5a, 0x90, 0x6a, 0xfb, 0xc2, 0x80, 0xc7, 0x3a, - 0x55, 0xf1, 0x5b, 0x65, 0x97, 0x90, 0xaa, 0x92, 0x0d, 0xc6, 0x3a, 0x55, 0x0d, 0xad, 0xc6, 0x0b, - 0xc9, 0x12, 0x7b, 0x2e, 0x1f, 0xaf, 0xbe, 0xd5, 0x2e, 0x09, 0x26, 0x3f, 0x63, 0xe1, 0xe0, 0x44, - 0xda, 0xf3, 0xf9, 0x2e, 0x19, 0x06, 0x71, 0x60, 0x91, 0x06, 0x32, 0xa3, 0x91, 0x1e, 0xd0, 0xc1, - 0x01, 0x35, 0x9e, 0xaa, 0x22, 0x29, 0xa3, 0xfd, 0xb1, 0xbd, 0xb0, 0x69, 0x6d, 0xb5, 0xfd, 0x9c, - 0x50, 0xa8, 0x83, 0x2c, 0x4d, 0x59, 0x2c, 0x6d, 0x40, 0x7e, 0x41, 0x2a, 0x49, 0x9f, 0x89, 0x30, - 0x65, 0x7d, 0xbb, 0x9b, 0x4b, 0x34, 0xa9, 0x24, 0x59, 0xd2, 0x57, 0x55, 0xd8, 0x5e, 0xcc, 0x25, - 0x9a, 0x54, 0x28, 0x4d, 0x48, 0xd8, 0x4b, 0x28, 0x2b, 0x19, 0x64, 0x13, 0xba, 0x69, 0x5e, 0x17, - 0x58, 0x7f, 0x4f, 0xda, 0xcb, 0x08, 0xb2, 0xca, 0x22, 0x1b, 0x00, 0xba, 0xc2, 0xab, 0x2d, 0x5e, - 0xc1, 0x01, 0x15, 0x0e, 0xf9, 0x48, 0x59, 0x48, 0xa2, 0x30, 0xa0, 0x87, 0x4c, 0x0a, 0x7b, 0x15, - 0x63, 0xe9, 0xed, 0x32, 0x96, 0x8c, 0x4c, 0xc7, 0x7d, 0x39, 0x56, 0xa9, 0xb2, 0x9f, 0x25, 0x2c, - 0x0d, 0x87, 0x2c, 0x96, 0xc2, 0x5e, 0x6b, 0xa8, 0xee, 0x1b, 0x59, 0xae, 0x5a, 0x19, 0x4b, 0xbe, - 0x09, 0x8b, 0x34, 0xa6, 0xd1, 0x58, 0x84, 0xc2, 0xcf, 0x62, 0x61, 0x13, 0xd4, 0xb5, 0x8d, 0xee, - 0x5e, 0x29, 0x44, 0xe5, 0xda, 0x68, 0x72, 0x07, 0xc0, 0x94, 0x72, 0x61, 0xaf, 0xa3, 0xee, 0x75, - 0xa3, 0x7b, 0xb7, 0x10, 0xa1, 0x66, 0x65, 0x24, 0xf9, 0x09, 0xb4, 0xd5, 0xce, 0x0b, 0xfb, 0x1a, - 0xaa, 0x7c, 0xec, 0x96, 0xc7, 0xad, 0x5b, 0x1c, 0xb7, 0xf8, 0xf1, 0xb8, 0xc8, 0x81, 0x32, 0x84, - 0x0d, 0xa7, 0x38, 0x6e, 0xdd, 0xbb, 0x34, 0xa6, 0xe9, 0xf8, 0x50, 0xb2, 0xc4, 0xcf, 0xcd, 0x3a, - 0x7f, 0x9a, 0x81, 0xe5, 0xfa, 0xac, 0xff, 0x0f, 0xc9, 0x52, 0x84, 0xfe, 0x4c, 0x3d, 0xf4, 0xcd, - 0xc1, 0xd2, 0x6a, 0x1c, 0x2c, 0x65, 0x72, 0xcd, 0x9e, 0x97, 0x5c, 0xed, 0x7a, 0x72, 0x35, 0x42, - 0x62, 0xee, 0x15, 0x42, 0xa2, 0xb9, 0xaf, 0xf3, 0xaf, 0xb2, 0xaf, 0xce, 0x7f, 0x5b, 0xb0, 0x5c, - 0xb7, 0xfe, 0x39, 0x16, 0x9b, 0x62, 0x5d, 0x5b, 0xe7, 0xac, 0xeb, 0xec, 0xd4, 0x75, 0x55, 0x59, - 0xd9, 0xc6, 0xe3, 0x4f, 0x53, 0x8a, 0x1f, 0x60, 0x64, 0x60, 0xb1, 0xe9, 0xf8, 0x9a, 0x52, 0x7c, - 0x1a, 0xc8, 0x70, 0xc4, 0xb0, 0xd6, 0x74, 0x7c, 0x4d, 0xa9, 0x7d, 0x48, 0x94, 0x51, 0xf6, 0x0c, - 0x6b, 0x4c, 0xc7, 0x2f, 0xc8, 0xdc, 0x3b, 0xae, 0x86, 0xd0, 0x15, 0xc6, 0xd0, 0xf5, 0xb2, 0x00, - 0xcd, 0xb2, 0xd0, 0x83, 0x8e, 0x64, 0xc3, 0x24, 0xa2, 0x92, 0x61, 0xa5, 0x59, 0xf0, 0x0d, 0x4d, - 0xbe, 0x02, 0x6b, 0x22, 0xa0, 0x11, 0xbb, 0xc7, 0x9f, 0xc5, 0xf7, 0x18, 0xed, 0x47, 0x61, 0xcc, - 0xb0, 0xe8, 0x2c, 0xf8, 0x67, 0x05, 0x0a, 0x35, 0xde, 0x8d, 0x84, 0xbd, 0x84, 0xe7, 0x93, 0xa6, - 0xc8, 0x17, 0x61, 0x36, 0xe1, 0x7d, 0x61, 0x2f, 0xe3, 0x06, 0xaf, 0x9a, 0x0d, 0x7e, 0xc4, 0xfb, - 0xb8, 0xb1, 0x28, 0x55, 0x6b, 0x9a, 0x84, 0xf1, 0x00, 0xcb, 0x4e, 0xc7, 0xc7, 0x6f, 0xe4, 0xf1, - 0x78, 0x60, 0xaf, 0x6a, 0x1e, 0x8f, 0x07, 0xce, 0x1f, 0x2d, 0x98, 0xd7, 0x9a, 0xaf, 0x79, 0xc7, - 0x4d, 0x49, 0xcf, 0x93, 0x45, 0x97, 0x74, 0xdc, 0x09, 0xac, 0xa9, 0x02, 0x77, 0x1b, 0x77, 0x22, - 0xa7, 0x9d, 0x8f, 0x60, 0xa9, 0x56, 0x71, 0xa6, 0xde, 0x50, 0xcc, 0x7d, 0x73, 0xa6, 0x72, 0xdf, - 0x74, 0xfe, 0x63, 0xc1, 0xfc, 0x77, 0xf8, 0xd1, 0x1b, 0x30, 0xed, 0x0d, 0x80, 0x21, 0x93, 0x69, - 0x18, 0xa8, 0x5b, 0x87, 0x9e, 0x7b, 0x85, 0x43, 0x3e, 0x86, 0x85, 0xf2, 0x94, 0x69, 0x23, 0xb8, - 0xed, 0x8b, 0x81, 0xfb, 0x5e, 0x38, 0x64, 0x7e, 0xa9, 0xec, 0xfc, 0xd3, 0x02, 0xbb, 0x52, 0x05, - 0x0e, 0x13, 0x16, 0xec, 0xc5, 0xfd, 0xc3, 0x1c, 0x1a, 0x85, 0x59, 0x91, 0xb0, 0x40, 0x4f, 0xff, - 0xe1, 0xe5, 0xea, 0x73, 0xc3, 0x8b, 0x8f, 0xa6, 0xc9, 0xa0, 0xb6, 0x2a, 0xdd, 0xdd, 0x4f, 0xaf, - 0xce, 0x09, 0x9a, 0x2d, 0x96, 0xd9, 0xf9, 0x77, 0x0b, 0x56, 0x1a, 0xe5, 0xee, 0x0d, 0x3e, 0x0d, - 0x36, 0x00, 0x44, 0x16, 0x04, 0x4c, 0x88, 0xe3, 0x2c, 0xd2, 0x31, 0x5e, 0xe1, 0x28, 0xbd, 0x63, - 0x1a, 0x46, 0xac, 0x8f, 0x55, 0xad, 0xed, 0x6b, 0x4a, 0x5d, 0x93, 0xc2, 0x38, 0xe0, 0x71, 0x10, - 0x65, 0xa2, 0xa8, 0x6d, 0x6d, 0xbf, 0xc6, 0x53, 0xc1, 0xcf, 0xd2, 0x94, 0xa7, 0x58, 0xdf, 0xda, - 0x7e, 0x4e, 0xa8, 0x0a, 0xf2, 0x84, 0x1f, 0xa9, 0xca, 0x56, 0xaf, 0x20, 0x3a, 0x21, 0x7c, 0x94, - 0x92, 0x0f, 0x00, 0x62, 0x1e, 0x6b, 0x9e, 0x0d, 0x38, 0x76, 0xdd, 0x8c, 0xfd, 0xc4, 0x88, 0xfc, - 0xca, 0x30, 0xb2, 0xad, 0x8e, 0x36, 0x15, 0xbb, 0xc2, 0xee, 0x36, 0xac, 0x3f, 0xcc, 0xf9, 0x7e, - 0x31, 0x80, 0x1c, 0xc0, 0x92, 0xa8, 0xc6, 0x20, 0x96, 0xc2, 0xee, 0xee, 0x7b, 0xd3, 0x8e, 0xac, - 0x5a, 0xb0, 0xfa, 0x75, 0x3d, 0xe7, 0xd7, 0x16, 0x40, 0x89, 0x47, 0x4d, 0x7a, 0x44, 0xa3, 0xac, - 0x28, 0x03, 0x39, 0x71, 0x6e, 0x4e, 0xd6, 0xf3, 0xaf, 0xf5, 0xe2, 0xfc, 0x9b, 0xbd, 0x4c, 0xfe, - 0xfd, 0xde, 0x82, 0x79, 0xbd, 0x08, 0x53, 0x2b, 0xd5, 0x36, 0xac, 0xea, 0x6d, 0xbf, 0xcb, 0xe3, - 0x7e, 0x28, 0x43, 0x13, 0x5c, 0x67, 0xf8, 0x6a, 0x8e, 0x01, 0xcf, 0x62, 0x89, 0x80, 0xdb, 0x7e, - 0x4e, 0xa8, 0x03, 0xa6, 0xba, 0xfd, 0x0f, 0xc2, 0x61, 0x98, 0x63, 0x6e, 0xfb, 0x67, 0x05, 0x2a, - 0x80, 0x54, 0x28, 0x65, 0xa9, 0x1e, 0x98, 0x87, 0x5e, 0x8d, 0xb7, 0xfb, 0xaf, 0x25, 0x58, 0xd6, - 0x2f, 0x90, 0x43, 0x96, 0x8e, 0xc2, 0x80, 0x11, 0x01, 0xcb, 0x07, 0x4c, 0x56, 0x9f, 0x25, 0xef, - 0x4c, 0x7b, 0xff, 0x60, 0x5f, 0xa1, 0x37, 0xf5, 0x69, 0xe4, 0xec, 0xfc, 0xe2, 0x6f, 0xff, 0xf8, - 0xd5, 0xcc, 0x36, 0xd9, 0xc2, 0x66, 0xcc, 0xe8, 0x56, 0xd9, 0x51, 0x39, 0x35, 0x8f, 0xb5, 0x49, - 0xfe, 0x3d, 0xf1, 0x42, 0xe5, 0x62, 0x02, 0xab, 0xf8, 0x84, 0xbc, 0x94, 0xdb, 0x3b, 0xe8, 0x76, - 0x87, 0xb8, 0x17, 0x75, 0xeb, 0x3d, 0x53, 0x3e, 0x77, 0x2c, 0x32, 0x82, 0x55, 0xf5, 0xf6, 0xab, - 0x18, 0x13, 0xe4, 0x0b, 0xd3, 0x7c, 0x98, 0x8e, 0x4a, 0xcf, 0x3e, 0x4f, 0xec, 0xdc, 0x44, 0x18, - 0xef, 0x93, 0xf7, 0x5e, 0x08, 0x03, 0xa7, 0xfd, 0x73, 0x0b, 0xd6, 0x9a, 0xf3, 0x7e, 0xa9, 0xe7, - 0x5e, 0x53, 0x5c, 0x3e, 0xbe, 0x1d, 0x0f, 0x7d, 0xdf, 0x24, 0x5f, 0x7a, 0xa9, 0x6f, 0x33, 0xf7, - 0x1f, 0xc2, 0xe2, 0x01, 0x93, 0xe6, 0x4d, 0x4c, 0xae, 0xbb, 0x79, 0x9b, 0xca, 0x2d, 0xda, 0x54, - 0xee, 0xfe, 0x30, 0x91, 0xe3, 0x5e, 0xf9, 0x0c, 0xa8, 0x3d, 0xc9, 0x9d, 0x77, 0xd0, 0xe5, 0x3a, - 0x59, 0x2b, 0x5c, 0x96, 0xef, 0xf1, 0xdf, 0x59, 0xea, 0xd6, 0x59, 0x6d, 0xae, 0x90, 0x8d, 0xca, - 0x65, 0x77, 0x4a, 0xd7, 0xa5, 0xb7, 0x7f, 0xb9, 0x43, 0x43, 0x5b, 0x2b, 0x42, 0xa1, 0xf7, 0xe5, - 0x8b, 0x84, 0x82, 0xbe, 0x70, 0x7c, 0xdd, 0xda, 0x46, 0xc4, 0xf5, 0x1e, 0x4e, 0x05, 0xf1, 0xd4, - 0xe6, 0xce, 0x6b, 0x41, 0x9c, 0xe4, 0x48, 0x14, 0xe2, 0xdf, 0x5a, 0xb0, 0x58, 0x6d, 0x0b, 0x91, - 0x1b, 0x65, 0x7d, 0x3d, 0xdb, 0x2d, 0xba, 0x2a, 0xb4, 0xb7, 0x11, 0xad, 0xdb, 0xbb, 0x79, 0x11, - 0xb4, 0x54, 0xe1, 0x50, 0x58, 0xff, 0x9c, 0xf7, 0x19, 0x8b, 0xa8, 0xc6, 0xce, 0x60, 0x99, 0x47, - 0x8d, 0x0e, 0xe4, 0x55, 0x41, 0xf5, 0x11, 0xea, 0x83, 0xde, 0xc1, 0x8b, 0xa1, 0x6a, 0xee, 0xc4, - 0x13, 0x4c, 0x7a, 0xa7, 0xe6, 0x69, 0x3b, 0xf1, 0x4e, 0xf1, 0x46, 0xf9, 0xad, 0xed, 0xed, 0x89, - 0x77, 0x2a, 0xe9, 0x60, 0xa2, 0x26, 0xf2, 0x07, 0x0b, 0xba, 0x95, 0xfe, 0x24, 0x79, 0xd7, 0x4c, - 0xe2, 0x6c, 0xd7, 0xf2, 0xaa, 0xe6, 0xb1, 0x87, 0xf3, 0xf8, 0x46, 0xef, 0xce, 0x05, 0xe7, 0x91, - 0xc5, 0x7d, 0xee, 0x9d, 0x16, 0xd7, 0x93, 0x49, 0x11, 0x2b, 0xd5, 0xce, 0x5f, 0x25, 0x56, 0xa6, - 0x34, 0x04, 0x5f, 0x4b, 0xac, 0xa4, 0x0a, 0x87, 0xc2, 0xfa, 0x08, 0xe6, 0x75, 0x9b, 0xec, 0xdc, - 0x8a, 0x54, 0x9e, 0x02, 0x95, 0xf6, 0x9b, 0xf3, 0x36, 0xba, 0x5b, 0x23, 0x2b, 0x85, 0xbb, 0x51, - 0x2e, 0xfc, 0xf6, 0xfe, 0x5f, 0x9e, 0x6f, 0x58, 0x7f, 0x7d, 0xbe, 0x61, 0xfd, 0xfd, 0xf9, 0x86, - 0xf5, 0xa3, 0x0f, 0x2f, 0xfc, 0x87, 0x40, 0xfd, 0xef, 0x87, 0xa3, 0x39, 0x44, 0xf1, 0xc1, 0xff, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x35, 0xff, 0xe4, 0x9e, 0x18, 0x00, 0x00, + // 1848 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x5f, 0x6f, 0x1c, 0x49, + 0x11, 0xd7, 0x78, 0xbd, 0xf6, 0xba, 0xd6, 0x7f, 0xdb, 0x49, 0x6e, 0x6e, 0x2f, 0x58, 0xbe, 0x39, + 0x24, 0x1c, 0x03, 0x33, 0x8e, 0x2f, 0xca, 0x71, 0xfc, 0x93, 0x8c, 0x63, 0xf9, 0x82, 0x92, 0xbb, + 0x30, 0x06, 0x4e, 0x20, 0x41, 0xd4, 0x9e, 0x6d, 0xaf, 0x27, 0x99, 0x9d, 0x1e, 0xa6, 0x7b, 0x36, + 0xac, 0x2c, 0x3f, 0xc0, 0x17, 0xe0, 0x81, 0xaf, 0xc0, 0x03, 0x3c, 0x21, 0x24, 0x5e, 0x78, 0xe0, + 0x15, 0xf1, 0x88, 0xc4, 0x17, 0x40, 0x11, 0x02, 0xf1, 0xc0, 0x03, 0xdf, 0x00, 0x75, 0x4d, 0x4f, + 0xcf, 0x1f, 0xaf, 0x1d, 0x47, 0x36, 0xe4, 0x9e, 0x76, 0xaa, 0xaa, 0xab, 0xea, 0xd7, 0xdd, 0x55, + 0xd5, 0xdd, 0xb5, 0xf0, 0x5e, 0xf2, 0x7c, 0xe0, 0xd1, 0x24, 0x0c, 0xa2, 0x90, 0xc5, 0xd2, 0x4b, + 0x79, 0x14, 0xf1, 0xcc, 0xfc, 0xba, 0x49, 0xca, 0x25, 0x27, 0xb3, 0x9a, 0xec, 0xdd, 0x1e, 0x70, + 0x3e, 0x88, 0x98, 0x52, 0xf0, 0x68, 0x1c, 0x73, 0x49, 0x65, 0xc8, 0x63, 0x91, 0x0f, 0xeb, 0x3d, + 0x1a, 0x84, 0xf2, 0x38, 0x3b, 0x74, 0x03, 0x3e, 0xf4, 0x68, 0x3a, 0xe0, 0x49, 0xca, 0x9f, 0xe1, + 0xc7, 0x97, 0xb5, 0xbe, 0xf0, 0xb4, 0x37, 0xe1, 0x19, 0xce, 0xe8, 0x2e, 0x8d, 0x92, 0x63, 0x7a, + 0xd7, 0x1b, 0xb0, 0x98, 0xa5, 0x54, 0xb2, 0xbe, 0xb6, 0x76, 0xef, 0xf9, 0x57, 0x84, 0x1b, 0x72, + 0x35, 0x7c, 0x48, 0x83, 0xe3, 0x30, 0x66, 0xe9, 0xb8, 0xd4, 0x1f, 0x32, 0x49, 0xbd, 0xd1, 0x59, + 0xad, 0x77, 0x34, 0x42, 0xa4, 0x0e, 0xb3, 0x23, 0x8f, 0x0d, 0x13, 0x39, 0xce, 0x85, 0xce, 0x03, + 0x58, 0xf6, 0x73, 0xbf, 0x0f, 0xe3, 0x23, 0xfe, 0x9d, 0x8c, 0xa5, 0x63, 0x42, 0x60, 0x3a, 0xa6, + 0x43, 0x66, 0x5b, 0xeb, 0xd6, 0xc6, 0x9c, 0x8f, 0xdf, 0xe4, 0x36, 0xcc, 0xa9, 0x5f, 0x91, 0xd0, + 0x80, 0xd9, 0x53, 0x28, 0x28, 0x19, 0xce, 0x3d, 0xb8, 0x51, 0xb1, 0xf2, 0x28, 0x14, 0x32, 0xb7, + 0x54, 0xd3, 0xb2, 0x9a, 0x5a, 0xbf, 0xb0, 0x60, 0xe9, 0x80, 0xc9, 0x87, 0x43, 0x3a, 0x60, 0x3e, + 0xfb, 0x49, 0xc6, 0x84, 0x24, 0x36, 0x14, 0x2b, 0xab, 0xc7, 0x17, 0xa4, 0xb2, 0x15, 0xf0, 0x58, + 0x52, 0x35, 0xeb, 0x02, 0x81, 0x61, 0x90, 0x1b, 0xd0, 0x0e, 0x95, 0x1d, 0xbb, 0x85, 0x92, 0x9c, + 0x20, 0xcb, 0xd0, 0x92, 0x74, 0x60, 0x4f, 0x23, 0x4f, 0x7d, 0xd6, 0x11, 0xb5, 0x9b, 0x88, 0x8e, + 0x81, 0x7c, 0x2f, 0xee, 0x73, 0x3d, 0x97, 0x57, 0x63, 0xea, 0x41, 0x27, 0x65, 0xa3, 0x50, 0x84, + 0x3c, 0x46, 0x48, 0x2d, 0xdf, 0xd0, 0x75, 0x4f, 0xad, 0xa6, 0xa7, 0x87, 0x70, 0xd3, 0x67, 0x42, + 0xd2, 0x54, 0x36, 0x9c, 0xbd, 0xfe, 0xe2, 0xff, 0x08, 0x6e, 0x3e, 0x49, 0xf9, 0x90, 0x4b, 0x76, + 0x55, 0x53, 0x4a, 0xe3, 0x28, 0x8b, 0x22, 0x84, 0xdb, 0xf1, 0xf1, 0xdb, 0xd9, 0x87, 0xd5, 0x9d, + 0x43, 0x7e, 0x0d, 0x38, 0xf7, 0x61, 0xd5, 0x67, 0x32, 0x1d, 0x5f, 0xd9, 0xd0, 0x53, 0x58, 0xd1, + 0x36, 0x3e, 0xa5, 0x32, 0x38, 0xde, 0x1b, 0xb1, 0x18, 0xcd, 0xc8, 0x71, 0x62, 0xcc, 0xa8, 0x6f, + 0x72, 0x1f, 0xba, 0x69, 0x19, 0x96, 0x68, 0xa8, 0xbb, 0x7d, 0xc3, 0x2d, 0x32, 0xb9, 0x12, 0xb2, + 0x7e, 0x75, 0xa0, 0xf3, 0x14, 0x16, 0x3e, 0x2e, 0xbc, 0x29, 0xc6, 0xc5, 0x71, 0x4c, 0xb6, 0x60, + 0x95, 0x8e, 0x68, 0x18, 0xd1, 0xc3, 0x88, 0x19, 0x3d, 0x61, 0x4f, 0xad, 0xb7, 0x36, 0xe6, 0xfc, + 0x49, 0x22, 0x67, 0x17, 0x96, 0x1a, 0xf9, 0x42, 0xb6, 0xa0, 0x53, 0x14, 0x00, 0xdb, 0x5a, 0x6f, + 0x9d, 0x0b, 0xd4, 0x8c, 0x72, 0x3e, 0x80, 0xee, 0xf7, 0x59, 0xaa, 0x62, 0x0d, 0x31, 0x6e, 0xc0, + 0x52, 0x21, 0xd2, 0x6c, 0x8d, 0xb4, 0xc9, 0x76, 0xfe, 0x39, 0x03, 0xdd, 0x8a, 0x49, 0xf2, 0x04, + 0x80, 0x1f, 0x3e, 0x63, 0x81, 0x7c, 0xcc, 0x24, 0x45, 0xa5, 0xee, 0xf6, 0x96, 0x9b, 0xd7, 0x1a, + 0xb7, 0x5a, 0x6b, 0xdc, 0xe4, 0xf9, 0x40, 0x31, 0x84, 0xab, 0x6a, 0x8d, 0x3b, 0xba, 0xeb, 0x7e, + 0x62, 0xf4, 0xfc, 0x8a, 0x0d, 0x72, 0x0b, 0x66, 0x84, 0xa4, 0x32, 0x13, 0x7a, 0xf3, 0x34, 0xa5, + 0x32, 0x69, 0xc8, 0x84, 0x28, 0xf3, 0xb4, 0x20, 0xd5, 0xf6, 0x85, 0x01, 0x8f, 0x75, 0xaa, 0xe2, + 0xb7, 0xca, 0x2e, 0x21, 0x55, 0x25, 0x1b, 0x8c, 0x75, 0xaa, 0x1a, 0x5a, 0x8d, 0x17, 0x92, 0x25, + 0xf6, 0x4c, 0x3e, 0x5e, 0x7d, 0xab, 0x5d, 0x12, 0x4c, 0x7e, 0xca, 0xc2, 0xc1, 0xb1, 0xb4, 0x67, + 0xf3, 0x5d, 0x32, 0x0c, 0xe2, 0xc0, 0x3c, 0x0d, 0x64, 0x46, 0x23, 0x3d, 0xa0, 0x83, 0x03, 0x6a, + 0x3c, 0x55, 0x45, 0x52, 0x46, 0xfb, 0x63, 0x7b, 0x6e, 0xdd, 0xda, 0x68, 0xfb, 0x39, 0xa1, 0x50, + 0x07, 0x59, 0x9a, 0xb2, 0x58, 0xda, 0x80, 0xfc, 0x82, 0x54, 0x92, 0x3e, 0x13, 0x61, 0xca, 0xfa, + 0x76, 0x37, 0x97, 0x68, 0x52, 0x49, 0xb2, 0xa4, 0xaf, 0xaa, 0xb0, 0x3d, 0x9f, 0x4b, 0x34, 0xa9, + 0x50, 0x9a, 0x90, 0xb0, 0x17, 0x50, 0x56, 0x32, 0xc8, 0x3a, 0x74, 0xd3, 0xbc, 0x2e, 0xb0, 0xfe, + 0x8e, 0xb4, 0x17, 0x11, 0x64, 0x95, 0x45, 0xd6, 0x00, 0x74, 0x85, 0x57, 0x5b, 0xbc, 0x84, 0x03, + 0x2a, 0x1c, 0xf2, 0xa1, 0xb2, 0x90, 0x44, 0x61, 0x40, 0x0f, 0x98, 0x14, 0xf6, 0x32, 0xc6, 0xd2, + 0x5b, 0x65, 0x2c, 0x19, 0x99, 0x8e, 0xfb, 0x72, 0xac, 0x52, 0x65, 0x3f, 0x4d, 0x58, 0x1a, 0x0e, + 0x59, 0x2c, 0x85, 0xbd, 0xd2, 0x50, 0xdd, 0x33, 0xb2, 0x5c, 0xb5, 0x32, 0x96, 0x7c, 0x1d, 0xe6, + 0x69, 0x4c, 0xa3, 0xb1, 0x08, 0x85, 0x9f, 0xc5, 0xc2, 0x26, 0xa8, 0x6b, 0x1b, 0xdd, 0x9d, 0x52, + 0x88, 0xca, 0xb5, 0xd1, 0xe4, 0x3e, 0x80, 0x29, 0xe5, 0xc2, 0x5e, 0x45, 0xdd, 0x5b, 0x46, 0x77, + 0xb7, 0x10, 0xa1, 0x66, 0x65, 0x24, 0xf9, 0x31, 0xb4, 0xd5, 0xce, 0x0b, 0xfb, 0x06, 0xaa, 0x7c, + 0xe4, 0x96, 0xc7, 0xad, 0x5b, 0x1c, 0xb7, 0xf8, 0xf1, 0xb4, 0xc8, 0x81, 0x32, 0x84, 0x0d, 0xa7, + 0x38, 0x6e, 0xdd, 0x5d, 0x1a, 0xd3, 0x74, 0x7c, 0x20, 0x59, 0xe2, 0xe7, 0x66, 0xc9, 0x37, 0x61, + 0x31, 0x8c, 0x43, 0xb9, 0x5b, 0x62, 0xbb, 0x79, 0x21, 0xb6, 0xc6, 0x68, 0xe7, 0x8f, 0x53, 0xb0, + 0x58, 0x5f, 0xb5, 0xff, 0x41, 0xb2, 0x15, 0xa9, 0x33, 0x55, 0x4f, 0x1d, 0x73, 0x30, 0xb5, 0x1a, + 0x07, 0x53, 0x99, 0x9c, 0xd3, 0xe7, 0x25, 0x67, 0xbb, 0x9e, 0x9c, 0x8d, 0x90, 0x9a, 0x79, 0x8d, + 0x90, 0x6a, 0xc6, 0xc5, 0xec, 0xeb, 0xc4, 0x85, 0xf3, 0xeb, 0x69, 0x58, 0xac, 0x5b, 0xff, 0x3f, + 0x16, 0xab, 0x62, 0x5d, 0x5b, 0xe7, 0xac, 0xeb, 0xf4, 0xc4, 0x75, 0x55, 0x59, 0xdd, 0xc6, 0xe3, + 0x53, 0x53, 0x8a, 0x1f, 0x60, 0x64, 0x61, 0xb1, 0xea, 0xf8, 0x9a, 0x52, 0x7c, 0x1a, 0xc8, 0x70, + 0xc4, 0xb0, 0x56, 0x75, 0x7c, 0x4d, 0xa9, 0x7d, 0x48, 0x94, 0x51, 0xf6, 0x02, 0x6b, 0x54, 0xc7, + 0x2f, 0xc8, 0xdc, 0x3b, 0xae, 0x86, 0xd0, 0x15, 0xca, 0xd0, 0xf5, 0xb2, 0x02, 0xcd, 0xb2, 0xd2, + 0x83, 0x8e, 0x64, 0xc3, 0x24, 0xa2, 0x92, 0x61, 0xa5, 0x9a, 0xf3, 0x0d, 0x4d, 0xbe, 0x04, 0x2b, + 0x22, 0xa0, 0x11, 0x7b, 0xc0, 0x5f, 0xc4, 0x0f, 0x18, 0xed, 0x47, 0x61, 0xcc, 0xb0, 0x68, 0xcd, + 0xf9, 0x67, 0x05, 0x0a, 0x35, 0xde, 0xad, 0x84, 0xbd, 0x80, 0xe7, 0x9b, 0xa6, 0xc8, 0xe7, 0x61, + 0x3a, 0xe1, 0x7d, 0x61, 0x2f, 0xe2, 0x06, 0x2f, 0x9b, 0x0d, 0x7e, 0xc2, 0xfb, 0xb8, 0xb1, 0x28, + 0x55, 0x6b, 0x9a, 0x84, 0xf1, 0x00, 0xcb, 0x56, 0xc7, 0xc7, 0x6f, 0xe4, 0xf1, 0x78, 0x60, 0x2f, + 0x6b, 0x1e, 0x8f, 0x07, 0xea, 0x48, 0xad, 0xa5, 0xd2, 0xc3, 0xdc, 0xe5, 0x4a, 0x7e, 0xa4, 0x4e, + 0x10, 0x39, 0x7f, 0xb0, 0x60, 0x56, 0xfb, 0x7a, 0xc3, 0x31, 0x62, 0x0e, 0x91, 0x3c, 0xbd, 0xf4, + 0x21, 0x82, 0x7b, 0x87, 0x55, 0x5c, 0x60, 0x7c, 0xe0, 0xde, 0xe5, 0xb4, 0xf3, 0x21, 0x2c, 0xd4, + 0xea, 0xc8, 0xc4, 0x3b, 0x91, 0xb9, 0xe1, 0x4e, 0x55, 0x6e, 0xb8, 0xce, 0x7f, 0x2c, 0x98, 0xfd, + 0x36, 0x3f, 0xfc, 0x0c, 0x4c, 0x7b, 0x0d, 0x60, 0xc8, 0x64, 0x1a, 0x06, 0xea, 0x9e, 0xa3, 0xe7, + 0x5e, 0xe1, 0x90, 0x8f, 0x60, 0xae, 0x3c, 0xd7, 0xda, 0x08, 0x6e, 0xf3, 0x72, 0xe0, 0xbe, 0x1b, + 0x0e, 0x99, 0x5f, 0x2a, 0x3b, 0xff, 0xb0, 0xc0, 0xae, 0xd4, 0x8d, 0x83, 0x84, 0x05, 0x3b, 0x71, + 0xff, 0x20, 0x87, 0x46, 0x61, 0x5a, 0x24, 0x2c, 0xd0, 0xd3, 0x7f, 0x7c, 0xb5, 0x13, 0xa1, 0xe1, + 0xc5, 0x47, 0xd3, 0x64, 0x50, 0x5b, 0x95, 0xee, 0xf6, 0x27, 0xd7, 0xe7, 0x04, 0xcd, 0x16, 0xcb, + 0xec, 0xfc, 0xbb, 0x05, 0x4b, 0x8d, 0x02, 0xf9, 0x19, 0x3e, 0x3f, 0xd6, 0x00, 0x44, 0x16, 0x04, + 0x4c, 0x88, 0xa3, 0x2c, 0xd2, 0x31, 0x5e, 0xe1, 0x28, 0xbd, 0x23, 0x1a, 0x46, 0xac, 0x8f, 0x75, + 0xb0, 0xed, 0x6b, 0x4a, 0x5d, 0xcc, 0xc2, 0x38, 0xe0, 0x71, 0x10, 0x65, 0xa2, 0xa8, 0x86, 0x6d, + 0xbf, 0xc6, 0x53, 0xc1, 0xcf, 0xd2, 0x94, 0xa7, 0x58, 0x11, 0xdb, 0x7e, 0x4e, 0xa8, 0x9a, 0xf3, + 0x8c, 0x1f, 0xaa, 0x5a, 0x58, 0xaf, 0x39, 0x3a, 0x21, 0x7c, 0x94, 0x92, 0xf7, 0x01, 0x62, 0x1e, + 0x6b, 0x9e, 0x0d, 0x38, 0x76, 0xd5, 0x8c, 0xfd, 0xd8, 0x88, 0xfc, 0xca, 0x30, 0xb2, 0xa9, 0x0e, + 0x43, 0x15, 0xbb, 0xc2, 0xee, 0x36, 0xac, 0x3f, 0xce, 0xf9, 0x7e, 0x31, 0x80, 0xec, 0xc3, 0x82, + 0xa8, 0xc6, 0x20, 0x16, 0xcf, 0xee, 0xf6, 0xbb, 0x93, 0x0e, 0xb9, 0x5a, 0xb0, 0xfa, 0x75, 0x3d, + 0xe7, 0x57, 0x16, 0x40, 0x89, 0x47, 0x4d, 0x7a, 0x44, 0xa3, 0xac, 0x28, 0x03, 0x39, 0x71, 0x6e, + 0x4e, 0xd6, 0xf3, 0xaf, 0x75, 0x71, 0xfe, 0x4d, 0x5f, 0x25, 0xff, 0x7e, 0x67, 0xc1, 0xac, 0x5e, + 0x84, 0x89, 0x95, 0x6a, 0x13, 0x96, 0xf5, 0xb6, 0xef, 0xf2, 0xb8, 0x1f, 0xca, 0xd0, 0x04, 0xd7, + 0x19, 0xbe, 0x9a, 0x63, 0xc0, 0xb3, 0x58, 0x22, 0xe0, 0xb6, 0x9f, 0x13, 0xea, 0x48, 0xaa, 0x6e, + 0xff, 0xa3, 0x70, 0x18, 0xe6, 0x98, 0xdb, 0xfe, 0x59, 0x81, 0x0a, 0x20, 0x15, 0x4a, 0x59, 0xaa, + 0x07, 0xe6, 0xa1, 0x57, 0xe3, 0x6d, 0xff, 0x6b, 0x01, 0x16, 0xf5, 0x9b, 0xe7, 0x80, 0xa5, 0xa3, + 0x30, 0x60, 0x44, 0xc0, 0xe2, 0x3e, 0x93, 0xd5, 0x87, 0xd0, 0xdb, 0x93, 0x5e, 0x5c, 0xd8, 0xc9, + 0xe8, 0x4d, 0x7c, 0x8c, 0x39, 0x5b, 0x3f, 0xff, 0xeb, 0xdf, 0x7f, 0x39, 0xb5, 0x49, 0x36, 0xb0, + 0xfd, 0x33, 0xba, 0x5b, 0xf6, 0x70, 0x4e, 0xcc, 0xf3, 0xf0, 0x34, 0xff, 0x3e, 0xf5, 0x42, 0xe5, + 0xe2, 0x14, 0x96, 0xf1, 0xd1, 0x7a, 0x25, 0xb7, 0xf7, 0xd1, 0xed, 0x16, 0x71, 0x2f, 0xeb, 0xd6, + 0x7b, 0xa1, 0x7c, 0x6e, 0x59, 0x64, 0x04, 0xcb, 0xea, 0xb5, 0x59, 0x31, 0x26, 0xc8, 0xe7, 0x26, + 0xf9, 0x30, 0x3d, 0x9c, 0x9e, 0x7d, 0x9e, 0xd8, 0xb9, 0x83, 0x30, 0xde, 0x23, 0xef, 0x5e, 0x08, + 0x03, 0xa7, 0xfd, 0x33, 0x0b, 0x56, 0x9a, 0xf3, 0x7e, 0xa5, 0xe7, 0x5e, 0x53, 0x5c, 0x3e, 0xf7, + 0x1d, 0x0f, 0x7d, 0xdf, 0x21, 0x5f, 0x78, 0xa5, 0x6f, 0x33, 0xf7, 0x1f, 0xc0, 0xfc, 0x3e, 0x93, + 0xe6, 0x15, 0x4e, 0x6e, 0xb9, 0x79, 0x63, 0xcc, 0x2d, 0x1a, 0x63, 0xee, 0xde, 0x30, 0x91, 0xe3, + 0x5e, 0x79, 0xb9, 0xaf, 0x35, 0x01, 0x9c, 0xb7, 0xd1, 0xe5, 0x2a, 0x59, 0x29, 0x5c, 0x96, 0x1d, + 0x80, 0xdf, 0x5a, 0xea, 0x9e, 0x5a, 0x6d, 0xe7, 0x90, 0xb5, 0xca, 0xf5, 0x78, 0x42, 0x9f, 0xa7, + 0xb7, 0x77, 0xb5, 0x43, 0x43, 0x5b, 0x2b, 0x42, 0xa1, 0xf7, 0xc5, 0xcb, 0x84, 0x82, 0xbe, 0x70, + 0x7c, 0xd5, 0xda, 0x44, 0xc4, 0xf5, 0xae, 0x51, 0x05, 0xf1, 0xc4, 0x76, 0xd2, 0x1b, 0x41, 0x9c, + 0xe4, 0x48, 0x14, 0xe2, 0xdf, 0x58, 0x30, 0x5f, 0x6d, 0x44, 0x91, 0xdb, 0x65, 0x7d, 0x3d, 0xdb, + 0x9f, 0xba, 0x2e, 0xb4, 0xf7, 0x10, 0xad, 0xdb, 0xbb, 0x73, 0x19, 0xb4, 0x54, 0xe1, 0x50, 0x58, + 0xff, 0x94, 0x77, 0x36, 0x8b, 0xa8, 0xc6, 0x5e, 0x64, 0x99, 0x47, 0x8d, 0x9e, 0xe7, 0x75, 0x41, + 0xf5, 0x11, 0xea, 0xa3, 0xde, 0xfe, 0xc5, 0x50, 0x35, 0xf7, 0xd4, 0x13, 0x4c, 0x7a, 0x27, 0xe6, + 0x31, 0x7d, 0xea, 0x9d, 0xe0, 0x8d, 0xf2, 0x1b, 0x9b, 0x9b, 0xa7, 0xde, 0x89, 0xa4, 0x83, 0x53, + 0x35, 0x91, 0xdf, 0x5b, 0xd0, 0xad, 0x74, 0x44, 0xc9, 0x3b, 0x66, 0x12, 0x67, 0xfb, 0xa4, 0xd7, + 0x35, 0x8f, 0x1d, 0x9c, 0xc7, 0xd7, 0x7a, 0xf7, 0x2f, 0x39, 0x8f, 0x2c, 0xee, 0x73, 0xef, 0xa4, + 0xb8, 0x9e, 0x9c, 0x16, 0xb1, 0x52, 0xed, 0x35, 0x56, 0x62, 0x65, 0x42, 0x0b, 0xf2, 0x8d, 0xc4, + 0x4a, 0xaa, 0x70, 0x28, 0xac, 0x4f, 0x60, 0x56, 0x37, 0xe6, 0xce, 0xad, 0x48, 0xe5, 0x29, 0x50, + 0x69, 0xf8, 0x39, 0x6f, 0xa1, 0xbb, 0x15, 0xb2, 0x54, 0xb8, 0x1b, 0xe5, 0xc2, 0x6f, 0xed, 0xfd, + 0xf9, 0xe5, 0x9a, 0xf5, 0x97, 0x97, 0x6b, 0xd6, 0xdf, 0x5e, 0xae, 0x59, 0x3f, 0xfc, 0xe0, 0xd2, + 0x7f, 0x41, 0xd4, 0xff, 0xf0, 0x38, 0x9c, 0x41, 0x14, 0xef, 0xff, 0x37, 0x00, 0x00, 0xff, 0xff, + 0x6c, 0x69, 0x4c, 0x07, 0x10, 0x19, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2946,6 +2964,22 @@ func (m *RolloutInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRollout(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } if len(m.Steps) > 0 { for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { { @@ -3246,6 +3280,17 @@ func (m *ReplicaSetInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.InitContainerImages) > 0 { + for iNdEx := len(m.InitContainerImages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.InitContainerImages[iNdEx]) + copy(dAtA[i:], m.InitContainerImages[iNdEx]) + i = encodeVarintRollout(dAtA, i, uint64(len(m.InitContainerImages[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } if m.Pong { i-- if m.Pong { @@ -4218,6 +4263,12 @@ func (m *RolloutInfo) Size() (n int) { n += 2 + l + sovRollout(uint64(l)) } } + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovRollout(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4332,6 +4383,12 @@ func (m *ReplicaSetInfo) Size() (n int) { if m.Pong { n += 3 } + if len(m.InitContainerImages) > 0 { + for _, s := range m.InitContainerImages { + l = len(s) + n += 2 + l + sovRollout(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6608,6 +6665,40 @@ func (m *RolloutInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, &ContainerInfo{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRollout(dAtA[iNdEx:]) @@ -7336,6 +7427,38 @@ func (m *ReplicaSetInfo) Unmarshal(dAtA []byte) error { } } m.Pong = bool(v != 0) + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerImages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRollout + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRollout + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRollout + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerImages = append(m.InitContainerImages, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRollout(dAtA[iNdEx:]) diff --git a/pkg/apiclient/rollout/rollout.proto b/pkg/apiclient/rollout/rollout.proto index 7949baddd4..53cfbc8ab2 100644 --- a/pkg/apiclient/rollout/rollout.proto +++ b/pkg/apiclient/rollout/rollout.proto @@ -96,6 +96,8 @@ message RolloutInfo { repeated ContainerInfo containers = 19; repeated github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CanaryStep steps = 20; + + repeated ContainerInfo initContainers = 21; } message ExperimentInfo { @@ -125,6 +127,7 @@ message ReplicaSetInfo { repeated PodInfo pods = 14; bool ping = 15; bool pong = 16; + repeated string initContainerImages = 17; } message PodInfo { diff --git a/pkg/apiclient/rollout/rollout.swagger.json b/pkg/apiclient/rollout/rollout.swagger.json index 490c5bcab8..3a74f48a24 100755 --- a/pkg/apiclient/rollout/rollout.swagger.json +++ b/pkg/apiclient/rollout/rollout.swagger.json @@ -632,6 +632,10 @@ "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.MeasurementRetention" }, "title": "MeasurementRetention object contains the settings for retaining the number of measurements during the analysis\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional" + }, + "ttlStrategy": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TTLStrategy", + "title": "TTLStrategy object contains the strategy for the time to live depending on if the analysis succeeded or failed\n+optional" } }, "title": "AnalysisRunSpec is the spec for a AnalysisRun resource" @@ -665,6 +669,10 @@ "dryRunSummary": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RunSummary", "title": "DryRunSummary contains the final results from the metric executions in the dry-run mode" + }, + "completedAt": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.Time", + "title": "CompletedAt indicates when the analysisRun completed" } }, "title": "AnalysisRunStatus is the status for a AnalysisRun resource" @@ -685,6 +693,19 @@ }, "title": "AnalysisRunStrategy configuration for the analysis runs and experiments to retain" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateRef": { + "type": "object", + "properties": { + "templateName": { + "type": "string", + "title": "TemplateName name of template to use in AnalysisRun\n+optional" + }, + "clusterScope": { + "type": "boolean", + "title": "Whether to look for the templateName at cluster scope or namespace scope\n+optional" + } + } + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AntiAffinity": { "type": "object", "properties": { @@ -813,6 +834,20 @@ }, "title": "ArgumentValueFrom defines references to fields within resources to grab for the value (i.e. Pod Template Hash)" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Authentication": { + "type": "object", + "properties": { + "sigv4": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Sigv4Config", + "title": "Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus\n+optional" + }, + "oauth2": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.OAuth2Config", + "title": "OAuth2 config\n+optional" + } + }, + "title": "Authentication method" + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AwsResourceRef": { "type": "object", "properties": { @@ -1147,14 +1182,30 @@ "type": "object", "properties": { "interval": { - "type": "string" + "type": "string", + "description": "+kubebuilder:default=\"5m\"\nInterval refers to the Interval time window in Datadog (default: 5m). Not to be confused with the polling rate for the metric." }, "query": { "type": "string" }, + "queries": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Queries is a map of query_name_as_key: query. You can then use query_name_as_key inside Formula.Used for v2\n+kubebuilder:validation:Type=object" + }, + "formula": { + "type": "string", + "title": "Formula refers to the Formula made up of the queries. Only useful with Queries. Used for v2" + }, "apiVersion": { "type": "string", - "description": "ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated." + "title": "ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated.\n+kubebuilder:validation:Enum=v1;v2\n+kubebuilder:default=v1" + }, + "aggregator": { + "type": "string", + "title": "+kubebuilder:validation:Enum=avg;min;max;sum;last;percentile;mean;l2norm;area\nAggregator is a type of aggregator to use for metrics-based queries (default: \"\"). Used for v2" } } }, @@ -1631,6 +1682,30 @@ }, "title": "NginxTrafficRouting configuration for Nginx ingress controller to control traffic routing" }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.OAuth2Config": { + "type": "object", + "properties": { + "tokenUrl": { + "type": "string", + "title": "OAuth2 provider token URL" + }, + "clientId": { + "type": "string", + "title": "OAuth2 client ID" + }, + "clientSecret": { + "type": "string", + "title": "OAuth2 client secret" + }, + "scopes": { + "type": "array", + "items": { + "type": "string" + }, + "title": "OAuth2 scopes\n+optional" + } + } + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ObjectRef": { "type": "object", "properties": { @@ -1645,6 +1720,10 @@ "name": { "type": "string", "title": "Name of the referent" + }, + "scaleDown": { + "type": "string", + "title": "Automatically scale down deployment" } }, "title": "ObjectRef holds a references to the Kubernetes object" @@ -1706,16 +1785,6 @@ }, "title": "PreferredDuringSchedulingIgnoredDuringExecution defines the weight of the anti-affinity injection" }, - "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusAuth": { - "type": "object", - "properties": { - "sigv4": { - "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Sigv4Config", - "title": "+optional" - } - }, - "title": "PrometheusMetric defines the prometheus query to perform canary analysis" - }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusMetric": { "type": "object", "properties": { @@ -1728,8 +1797,8 @@ "title": "Query is a raw prometheus query to perform" }, "authentication": { - "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusAuth", - "title": "Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus\n+optional" + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Authentication", + "title": "Authentication details\n+optional" }, "timeout": { "type": "string", @@ -1784,7 +1853,7 @@ "templates": { "type": "array", "items": { - "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysisTemplate" + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateRef" }, "title": "Templates reference to a list of analysis templates to combine for an AnalysisRun\n+patchMergeKey=templateName\n+patchStrategy=merge" }, @@ -1844,19 +1913,6 @@ } } }, - "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysisTemplate": { - "type": "object", - "properties": { - "templateName": { - "type": "string", - "title": "TemplateName name of template to use in AnalysisRun\n+optional" - }, - "clusterScope": { - "type": "boolean", - "title": "Whether to look for the templateName at cluster scope or namespace scope\n+optional" - } - } - }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutCondition": { "type": "object", "properties": { @@ -1907,6 +1963,17 @@ "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutExperimentStepAnalysisTemplateRef" }, "title": "Analyses reference which analysis templates to run with the experiment\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "dryRun": { + "type": "array", + "items": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun" + }, + "title": "DryRun object contains the settings for running the analysis in Dry-Run mode\n+patchMergeKey=metricName\n+patchStrategy=merge\n+optional" + }, + "analysisRunMetadata": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunMetadata", + "title": "AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns\n+optional" } }, "title": "RolloutExperimentStep defines a template that is used to create a experiment for a step" @@ -2236,6 +2303,11 @@ "format": "byte" }, "title": "+kubebuilder:validation:Schemaless\n+kubebuilder:pruning:PreserveUnknownFields\n+kubebuilder:validation:Type=object\nPlugins holds specific configuration that traffic router plugins can use for routing traffic" + }, + "maxTrafficWeight": { + "type": "integer", + "format": "int32", + "title": "MaxTrafficWeight The total weight of traffic. If unspecified, it defaults to 100" } }, "title": "RolloutTrafficRouting hosts all the different configuration for supported service meshes to enable more fine-grained traffic routing" @@ -2486,6 +2558,27 @@ }, "description": "TLSRoute holds the information on the virtual service's TLS/HTTPS routes that are desired to be matched for changing weights." }, + "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TTLStrategy": { + "type": "object", + "properties": { + "secondsAfterCompletion": { + "type": "integer", + "format": "int32", + "description": "SecondsAfterCompletion is the number of seconds to live after completion." + }, + "secondsAfterFailure": { + "type": "integer", + "format": "int32", + "description": "SecondsAfterFailure is the number of seconds to live after failure." + }, + "secondsAfterSuccess": { + "type": "integer", + "format": "int32", + "description": "SecondsAfterSuccess is the number of seconds to live after success." + } + }, + "title": "TTLStrategy defines the strategy for the time to live depending on if the analysis succeeded or failed" + }, "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateService": { "type": "object", "properties": { @@ -2596,6 +2689,10 @@ "type": "string", "format": "byte", "title": "+kubebuilder:validation:Schemaless\n+kubebuilder:pruning:PreserveUnknownFields\n+kubebuilder:validation:Type=object\nJSONBody is the body of the web metric in a json format (method must be POST/PUT)" + }, + "authentication": { + "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Authentication", + "title": "Authentication details\n+optional" } } }, @@ -2697,7 +2794,7 @@ "completions": { "type": "integer", "format": "int32", - "title": "Specifies the desired number of successfully finished pods the\njob should be run with. Setting to nil means that the success of any\npod signals the success of all pods, and allows parallelism to have any positive\nvalue. Setting to 1 means that parallelism is limited to 1 and the success of that\npod signals the success of the job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/\n+optional" + "title": "Specifies the desired number of successfully finished pods the\njob should be run with. Setting to null means that the success of any\npod signals the success of all pods, and allows parallelism to have any positive\nvalue. Setting to 1 means that parallelism is limited to 1 and the success of that\npod signals the success of the job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/\n+optional" }, "activeDeadlineSeconds": { "type": "string", @@ -2706,13 +2803,23 @@ }, "podFailurePolicy": { "$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicy", - "description": "Specifies the policy of handling failed pods. In particular, it allows to\nspecify the set of actions and conditions which need to be\nsatisfied to take the associated action.\nIf empty, the default behaviour applies - the counter of failed pods,\nrepresented by the jobs's .status.failed field, is incremented and it is\nchecked against the backoffLimit. This field cannot be used in combination\nwith restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the\n`JobPodFailurePolicy` feature gate (disabled by default).\n+optional" + "description": "Specifies the policy of handling failed pods. In particular, it allows to\nspecify the set of actions and conditions which need to be\nsatisfied to take the associated action.\nIf empty, the default behaviour applies - the counter of failed pods,\nrepresented by the jobs's .status.failed field, is incremented and it is\nchecked against the backoffLimit. This field cannot be used in combination\nwith restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy`\nfeature gate is enabled (enabled by default).\n+optional" }, "backoffLimit": { "type": "integer", "format": "int32", "title": "Specifies the number of retries before marking this job failed.\nDefaults to 6\n+optional" }, + "backoffLimitPerIndex": { + "type": "integer", + "format": "int32", + "title": "Specifies the limit for the number of retries within an\nindex before marking this index as failed. When enabled the number of\nfailures per index is kept in the pod's\nbatch.kubernetes.io/job-index-failure-count annotation. It can only\nbe set when Job's completionMode=Indexed, and the Pod's restart\npolicy is Never. The field is immutable.\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex`\nfeature gate is enabled (enabled by default).\n+optional" + }, + "maxFailedIndexes": { + "type": "integer", + "format": "int32", + "title": "Specifies the maximal number of failed indexes before marking the Job as\nfailed, when backoffLimitPerIndex is set. Once the number of failed\nindexes exceeds this number the entire Job is marked as Failed and its\nexecution is terminated. When left as null the job continues execution of\nall of its indexes and is marked with the `Complete` Job condition.\nIt can only be specified when backoffLimitPerIndex is set.\nIt can be null or up to completions. It is required and must be\nless than or equal to 10^4 when is completions greater than 10^5.\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex`\nfeature gate is enabled (enabled by default).\n+optional" + }, "selector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", "title": "A label query over pods that should match the pod count.\nNormally, the system sets this field for you.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors\n+optional" @@ -2723,7 +2830,7 @@ }, "template": { "$ref": "#/definitions/k8s.io.api.core.v1.PodTemplateSpec", - "title": "Describes the pod that will be created when executing a job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + "title": "Describes the pod that will be created when executing a job.\nThe only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\".\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" }, "ttlSecondsAfterFinished": { "type": "integer", @@ -2732,11 +2839,15 @@ }, "completionMode": { "type": "string", - "description": "CompletionMode specifies how Pod completions are tracked. It can be\n`NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have\nbeen .spec.completions successfully completed Pods. Each Pod completion is\nhomologous to each other.\n\n`Indexed` means that the Pods of a\nJob get an associated completion index from 0 to (.spec.completions - 1),\navailable in the annotation batch.kubernetes.io/job-completion-index.\nThe Job is considered complete when there is one successfully completed Pod\nfor each index.\nWhen value is `Indexed`, .spec.completions must be specified and\n`.spec.parallelism` must be less than or equal to 10^5.\nIn addition, The Pod name takes the form\n`$(job-name)-$(index)-$(random-string)`,\nthe Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future.\nIf the Job controller observes a mode that it doesn't recognize, which\nis possible during upgrades due to version skew, the controller\nskips updates for the Job.\n+optional" + "description": "completionMode specifies how Pod completions are tracked. It can be\n`NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have\nbeen .spec.completions successfully completed Pods. Each Pod completion is\nhomologous to each other.\n\n`Indexed` means that the Pods of a\nJob get an associated completion index from 0 to (.spec.completions - 1),\navailable in the annotation batch.kubernetes.io/job-completion-index.\nThe Job is considered complete when there is one successfully completed Pod\nfor each index.\nWhen value is `Indexed`, .spec.completions must be specified and\n`.spec.parallelism` must be less than or equal to 10^5.\nIn addition, The Pod name takes the form\n`$(job-name)-$(index)-$(random-string)`,\nthe Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future.\nIf the Job controller observes a mode that it doesn't recognize, which\nis possible during upgrades due to version skew, the controller\nskips updates for the Job.\n+optional" }, "suspend": { "type": "boolean", - "description": "Suspend specifies whether the Job controller should create Pods or not. If\na Job is created with suspend set to true, no Pods are created by the Job\ncontroller. If a Job is suspended after creation (i.e. the flag goes from\nfalse to true), the Job controller will delete all active Pods associated\nwith this Job. Users must design their workload to gracefully handle this.\nSuspending a Job will reset the StartTime field of the Job, effectively\nresetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\n+optional" + "description": "suspend specifies whether the Job controller should create Pods or not. If\na Job is created with suspend set to true, no Pods are created by the Job\ncontroller. If a Job is suspended after creation (i.e. the flag goes from\nfalse to true), the Job controller will delete all active Pods associated\nwith this Job. Users must design their workload to gracefully handle this.\nSuspending a Job will reset the StartTime field of the Job, effectively\nresetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\n+optional" + }, + "podReplacementPolicy": { + "type": "string", + "description": "podReplacementPolicy specifies when to create replacement Pods.\nPossible values are:\n- TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value.\nTerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.\nThis is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.\nThis is on by default.\n+optional" } }, "description": "JobSpec describes how the job execution will look like." @@ -2763,7 +2874,8 @@ }, "operator": { "type": "string", - "description": "Represents the relationship between the container exit code(s) and the\nspecified values. Containers completed with success (exit code 0) are\nexcluded from the requirement check. Possible values are:\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown operator by assuming the requirement is not satisfied." + "description": "- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown operator by assuming the requirement is not satisfied.", + "title": "Represents the relationship between the container exit code(s) and the\nspecified values. Containers completed with success (exit code 0) are\nexcluded from the requirement check. Possible values are:" }, "values": { "type": "array", @@ -2795,7 +2907,8 @@ "properties": { "action": { "type": "string", - "description": "Specifies the action taken on a pod failure when the requirements are satisfied.\nPossible values are:\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown action by skipping the rule." + "description": "- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown action by skipping the rule.", + "title": "Specifies the action taken on a pod failure when the requirements are satisfied.\nPossible values are:" }, "onExitCodes": { "$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement", @@ -2806,10 +2919,10 @@ "items": { "$ref": "#/definitions/k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern" }, - "title": "Represents the requirement on the pod conditions. The requirement is represented\nas a list of pod condition patterns. The requirement is satisfied if at\nleast one pattern matches an actual pod condition. At most 20 elements are allowed.\n+listType=atomic" + "title": "Represents the requirement on the pod conditions. The requirement is represented\nas a list of pod condition patterns. The requirement is satisfied if at\nleast one pattern matches an actual pod condition. At most 20 elements are allowed.\n+listType=atomic\n+optional" } }, - "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.\nOne of OnExitCodes and onPodConditions, but not both, can be used in each rule." + "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.\nOne of onExitCodes and onPodConditions, but not both, can be used in each rule." }, "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource": { "type": "object", @@ -3004,6 +3117,46 @@ }, "description": "Represents a cinder volume resource in Openstack.\nA Cinder volume must exist before mounting to a container.\nThe volume must also be in the same region as the kubelet.\nCinder volumes support ownership management and SELinux relabeling." }, + "k8s.io.api.core.v1.ClaimSource": { + "type": "object", + "properties": { + "resourceClaimName": { + "type": "string", + "description": "ResourceClaimName is the name of a ResourceClaim object in the same\nnamespace as this pod." + }, + "resourceClaimTemplateName": { + "type": "string", + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate\nobject in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will\nbe bound to this pod. When this pod is deleted, the ResourceClaim\nwill also be deleted. The pod name and resource name, along with a\ngenerated component, will be used to form a unique name for the\nResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the\ncorresponding ResourceClaim by the control plane after creating the\nResourceClaim." + } + }, + "description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must\ntreat an empty object as if it has an unknown value." + }, + "k8s.io.api.core.v1.ClusterTrustBundleProjection": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Select a single ClusterTrustBundle by object name. Mutually-exclusive\nwith signerName and labelSelector.\n+optional" + }, + "signerName": { + "type": "string", + "title": "Select all ClusterTrustBundles that match this signer name.\nMutually-exclusive with name. The contents of all selected\nClusterTrustBundles will be unified and deduplicated.\n+optional" + }, + "labelSelector": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", + "title": "Select all ClusterTrustBundles that match this label selector. Only has\neffect if signerName is set. Mutually-exclusive with name. If unset,\ninterpreted as \"match nothing\". If set but empty, interpreted as \"match\neverything\".\n+optional" + }, + "optional": { + "type": "boolean", + "title": "If true, don't block pod startup if the referenced ClusterTrustBundle(s)\naren't available. If using name, then the named ClusterTrustBundle is\nallowed not to exist. If using signerName, then the combination of\nsignerName and labelSelector is allowed to match zero\nClusterTrustBundles.\n+optional" + }, + "path": { + "type": "string", + "description": "Relative path from the volume root to write the bundle." + } + }, + "description": "ClusterTrustBundleProjection describes how to select a set of\nClusterTrustBundle objects and project their contents into the pod\nfilesystem." + }, "k8s.io.api.core.v1.ConfigMapEnvSource": { "type": "object", "properties": { @@ -3135,6 +3288,17 @@ "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", "title": "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n+optional" }, + "resizePolicy": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.ContainerResizePolicy" + }, + "title": "Resources resize policy for the container.\n+featureGate=InPlacePodVerticalScaling\n+optional\n+listType=atomic" + }, + "restartPolicy": { + "type": "string", + "title": "RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted.\n+featureGate=SidecarContainers\n+optional" + }, "volumeMounts": { "type": "array", "items": { @@ -3224,6 +3388,20 @@ }, "description": "ContainerPort represents a network port in a single container." }, + "k8s.io.api.core.v1.ContainerResizePolicy": { + "type": "object", + "properties": { + "resourceName": { + "type": "string", + "description": "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." + }, + "restartPolicy": { + "type": "string", + "description": "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." + } + }, + "description": "ContainerResizePolicy represents resource resize policy for the container." + }, "k8s.io.api.core.v1.DownwardAPIProjection": { "type": "object", "properties": { @@ -3287,7 +3465,7 @@ }, "sizeLimit": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity", - "title": "sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" + "title": "sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" } }, "description": "Represents an empty directory for a pod.\nEmpty directory volumes support ownership management and SELinux relabeling." @@ -3418,6 +3596,17 @@ "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", "title": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources\nalready allocated to the pod.\n+optional" }, + "resizePolicy": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.ContainerResizePolicy" + }, + "title": "Resources resize policy for the container.\n+featureGate=InPlacePodVerticalScaling\n+optional\n+listType=atomic" + }, + "restartPolicy": { + "type": "string", + "title": "Restart policy for the container to manage the restart behavior of each\ncontainer within a pod.\nThis may only be set for init containers. You cannot set this field on\nephemeral containers.\n+featureGate=SidecarContainers\n+optional" + }, "volumeMounts": { "type": "array", "items": { @@ -3685,7 +3874,7 @@ "properties": { "name": { "type": "string", - "title": "The header field name" + "description": "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." }, "value": { "type": "string", @@ -3826,6 +4015,10 @@ "tcpSocket": { "$ref": "#/definitions/k8s.io.api.core.v1.TCPSocketAction", "title": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.\n+optional" + }, + "sleep": { + "$ref": "#/definitions/k8s.io.api.core.v1.SleepAction", + "title": "Sleep represents the duration that the container should sleep before being terminated.\n+featureGate=PodLifecycleSleepAction\n+optional" } }, "description": "LifecycleHandler defines a specific action that should be taken in a lifecycle\nhook. One and only one of the fields, except TCPSocket must be specified." @@ -3958,7 +4151,7 @@ "title": "selector is a label query over volumes to consider for binding.\n+optional" }, "resources": { - "$ref": "#/definitions/k8s.io.api.core.v1.ResourceRequirements", + "$ref": "#/definitions/k8s.io.api.core.v1.VolumeResourceRequirements", "title": "resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional" }, "volumeName": { @@ -3975,11 +4168,15 @@ }, "dataSource": { "$ref": "#/definitions/k8s.io.api.core.v1.TypedLocalObjectReference", - "title": "dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nIf the AnyVolumeDataSource feature gate is enabled, this field will always have\nthe same contents as the DataSourceRef field.\n+optional" + "title": "dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource.\n+optional" }, "dataSourceRef": { - "$ref": "#/definitions/k8s.io.api.core.v1.TypedLocalObjectReference", - "title": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any local object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the DataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, both fields (DataSource and DataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nThere are two important differences between DataSource and DataSourceRef:\n* While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n+optional" + "$ref": "#/definitions/k8s.io.api.core.v1.TypedObjectReference", + "title": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\n+optional" + }, + "volumeAttributesClassName": { + "type": "string", + "title": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\nIf specified, the CSI driver will create or update the volume with the attributes defined\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\nwill be set by the persistentvolume controller if it exists.\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\nexists.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.\n+featureGate=VolumeAttributesClass\n+optional" } }, "title": "PersistentVolumeClaimSpec describes the common attributes of storage devices\nand allows a Source for provider-specific attributes" @@ -4051,7 +4248,7 @@ "properties": { "labelSelector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", - "title": "A label query over a set of resources, in this case pods.\n+optional" + "title": "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods.\n+optional" }, "namespaces": { "type": "array", @@ -4067,6 +4264,20 @@ "namespaceSelector": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector", "title": "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces.\n+optional" + }, + "matchLabelKeys": { + "type": "array", + "items": { + "type": "string" + }, + "title": "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\n+listType=atomic\n+optional" + }, + "mismatchLabelKeys": { + "type": "array", + "items": { + "type": "string" + }, + "title": "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.\n+listType=atomic\n+optional" } }, "title": "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key \u003ctopologyKey\u003e matches that of any node on which\na pod of the set of pods is running" @@ -4152,6 +4363,30 @@ }, "title": "PodReadinessGate contains the reference to a pod condition" }, + "k8s.io.api.core.v1.PodResourceClaim": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name uniquely identifies this resource claim inside the pod.\nThis must be a DNS_LABEL." + }, + "source": { + "$ref": "#/definitions/k8s.io.api.core.v1.ClaimSource", + "description": "Source describes where to find the ResourceClaim." + } + }, + "description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource.\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod.\nContainers that need access to the ResourceClaim reference it with this name." + }, + "k8s.io.api.core.v1.PodSchedulingGate": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the scheduling gate.\nEach scheduling gate must have a unique name field." + } + }, + "description": "PodSchedulingGate is associated to a Pod to guard its scheduling." + }, "k8s.io.api.core.v1.PodSecurityContext": { "type": "object", "properties": { @@ -4183,7 +4418,7 @@ "type": "string", "format": "int64" }, - "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" + "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows.\n+optional" }, "fsGroup": { "type": "string", @@ -4242,7 +4477,7 @@ }, "restartPolicy": { "type": "string", - "title": "Restart policy for all containers within the pod.\nOne of Always, OnFailure, Never.\nDefault to Always.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n+optional" + "title": "Restart policy for all containers within the pod.\nOne of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.\nDefault to Always.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy\n+optional" }, "terminationGracePeriodSeconds": { "type": "string", @@ -4395,6 +4630,20 @@ "hostUsers": { "type": "boolean", "title": "Use the host's user namespace.\nOptional: Default to true.\nIf set to true or not present, the pod will be run in the host user namespace, useful\nfor when the pod needs a feature only available to the host user namespace, such as\nloading a kernel module with CAP_SYS_MODULE.\nWhen set to false, a new userns is created for the pod. Setting false is useful for\nmitigating container breakout vulnerabilities even allowing users to run their\ncontainers as root without actually having root privileges on the host.\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.\n+k8s:conversion-gen=false\n+optional" + }, + "schedulingGates": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodSchedulingGate" + }, + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.\n\n+patchMergeKey=name\n+patchStrategy=merge\n+listType=map\n+listMapKey=name\n+featureGate=PodSchedulingReadiness\n+optional" + }, + "resourceClaims": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.PodResourceClaim" + }, + "description": "ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\nThis field is immutable.\n\n+patchMergeKey=name\n+patchStrategy=merge,retainKeys\n+listType=map\n+listMapKey=name\n+featureGate=DynamicResourceAllocation\n+optional" } }, "description": "PodSpec is a description of a pod." @@ -4503,7 +4752,7 @@ }, "grpc": { "$ref": "#/definitions/k8s.io.api.core.v1.GRPCAction", - "title": "GRPC specifies an action involving a GRPC port.\nThis is a beta field and requires enabling GRPCContainerProbe feature gate.\n+featureGate=GRPCContainerProbe\n+optional" + "title": "GRPC specifies an action involving a GRPC port.\n+optional" } }, "description": "ProbeHandler defines a specific action that should be taken in a probe.\nOne and only one of the fields must be specified." @@ -4597,6 +4846,16 @@ }, "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." }, + "k8s.io.api.core.v1.ResourceClaim": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." + } + }, + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims." + }, "k8s.io.api.core.v1.ResourceFieldSelector": { "type": "object", "properties": { @@ -4630,7 +4889,14 @@ "additionalProperties": { "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" }, - "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n+optional" + "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n+optional" + }, + "claims": { + "type": "array", + "items": { + "$ref": "#/definitions/k8s.io.api.core.v1.ResourceClaim" + }, + "description": "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.\n\n+listType=map\n+listMapKey=name\n+featureGate=DynamicResourceAllocation\n+optional" } }, "description": "ResourceRequirements describes the compute resource requirements." @@ -4713,7 +4979,7 @@ }, "localhostProfile": { "type": "string", - "title": "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust only be set if type is \"Localhost\".\n+optional" + "title": "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type.\n+optional" } }, "title": "SeccompProfile defines a pod/container's seccomp profile settings.\nOnly one profile source may be set.\n+union" @@ -4867,6 +5133,17 @@ }, "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." }, + "k8s.io.api.core.v1.SleepAction": { + "type": "object", + "properties": { + "seconds": { + "type": "string", + "format": "int64", + "description": "Seconds is the number of seconds to sleep." + } + }, + "description": "SleepAction describes a \"sleep\" action." + }, "k8s.io.api.core.v1.StorageOSVolumeSource": { "type": "object", "properties": { @@ -4975,18 +5252,18 @@ }, "nodeAffinityPolicy": { "type": "string", - "description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\nwhen calculating pod topology spread skew. Options are:\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.\nThis is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\n+optional" + "description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\nwhen calculating pod topology spread skew. Options are:\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\n+optional" }, "nodeTaintsPolicy": { "type": "string", - "description": "NodeTaintsPolicy indicates how we will treat node taints when calculating\npod topology spread skew. Options are:\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\nhas a toleration, are included.\n- Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.\nThis is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\n+optional" + "description": "NodeTaintsPolicy indicates how we will treat node taints when calculating\npod topology spread skew. Options are:\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\nhas a toleration, are included.\n- Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.\n+optional" }, "matchLabelKeys": { "type": "array", "items": { "type": "string" }, - "title": "MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. Keys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n+listType=atomic\n+optional" + "description": "MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nMatchLabelKeys cannot be set when LabelSelector isn't set.\nKeys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).\n+listType=atomic\n+optional" } }, "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology." @@ -5009,6 +5286,27 @@ }, "title": "TypedLocalObjectReference contains enough information to let you locate the\ntyped referenced object inside the same namespace.\n+structType=atomic" }, + "k8s.io.api.core.v1.TypedObjectReference": { + "type": "object", + "properties": { + "apiGroup": { + "type": "string", + "title": "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind is the type of resource being referenced" + }, + "name": { + "type": "string", + "title": "Name is the name of resource being referenced" + }, + "namespace": { + "type": "string", + "title": "Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.\n+featureGate=CrossNamespaceVolumeDataSource\n+optional" + } + } + }, "k8s.io.api.core.v1.Volume": { "type": "object", "properties": { @@ -5085,10 +5383,34 @@ "serviceAccountToken": { "$ref": "#/definitions/k8s.io.api.core.v1.ServiceAccountTokenProjection", "title": "serviceAccountToken is information about the serviceAccountToken data to project\n+optional" + }, + "clusterTrustBundle": { + "$ref": "#/definitions/k8s.io.api.core.v1.ClusterTrustBundleProjection", + "description": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\nof ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the\ncombination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written\ninto the pod filesystem. Esoteric PEM features such as inter-block\ncomments and block headers are stripped. Certificates are deduplicated.\nThe ordering of certificates within the file is arbitrary, and Kubelet\nmay change the order over time.\n\n+featureGate=ClusterTrustBundleProjection\n+optional" } }, "title": "Projection that may be projected along with other supported volume types" }, + "k8s.io.api.core.v1.VolumeResourceRequirements": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" + }, + "title": "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n+optional" + }, + "requests": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/k8s.io.apimachinery.pkg.api.resource.Quantity" + }, + "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/\n+optional" + } + }, + "description": "VolumeResourceRequirements describes the storage resource requirements for a volume." + }, "k8s.io.api.core.v1.VolumeSource": { "type": "object", "properties": { @@ -5265,7 +5587,7 @@ }, "hostProcess": { "type": "boolean", - "title": "HostProcess determines if a container should be run as a 'Host Process' container.\nThis field is alpha-level and will only be honored by components that enable the\nWindowsHostProcessContainers feature flag. Setting this field without the feature\nflag will result in errors when validating the Pod. All of a Pod's containers must\nhave the same effective HostProcess value (it is not allowed to have a mix of HostProcess\ncontainers and non-HostProcess containers). In addition, if HostProcess is true\nthen HostNetwork must also be set to true.\n+optional" + "title": "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true.\n+optional" } }, "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials." @@ -5315,7 +5637,7 @@ "properties": { "key": { "type": "string", - "title": "key is the label key that the selector applies to.\n+patchMergeKey=key\n+patchStrategy=merge" + "description": "key is the label key that the selector applies to." }, "operator": { "type": "string", @@ -5370,7 +5692,7 @@ "properties": { "name": { "type": "string", - "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names\n+optional" }, "generateName": { "type": "string", @@ -5378,7 +5700,7 @@ }, "namespace": { "type": "string", - "description": "Namespace defines the space within which each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional" + "description": "Namespace defines the space within which each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces\n+optional" }, "selfLink": { "type": "string", @@ -5386,7 +5708,7 @@ }, "uid": { "type": "string", - "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids\n+optional" }, "resourceVersion": { "type": "string", @@ -5415,14 +5737,14 @@ "additionalProperties": { "type": "string" }, - "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional" + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\n+optional" }, "annotations": { "type": "object", "additionalProperties": { "type": "string" }, - "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional" + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations\n+optional" }, "ownerReferences": { "type": "array", @@ -5461,11 +5783,11 @@ }, "name": { "type": "string", - "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" }, "uid": { "type": "string", - "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" + "title": "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" }, "controller": { "type": "boolean", @@ -5803,6 +6125,12 @@ }, "pong": { "type": "boolean" + }, + "initContainerImages": { + "type": "array", + "items": { + "type": "string" + } } } }, @@ -5910,6 +6238,12 @@ "items": { "$ref": "#/definitions/github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.CanaryStep" } + }, + "initContainers": { + "type": "array", + "items": { + "$ref": "#/definitions/rollout.ContainerInfo" + } } } }, diff --git a/pkg/apis/api-rules/violation_exceptions.list b/pkg/apis/api-rules/violation_exceptions.list index 1d968d19e0..ea9a241837 100644 --- a/pkg/apis/api-rules/violation_exceptions.list +++ b/pkg/apis/api-rules/violation_exceptions.list @@ -9,6 +9,7 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,DryRun API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,Metrics +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AnalysisTemplateSpec,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,ApisixRoute,Rules API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,AppMeshVirtualService,Routes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,CanaryStrategy,Steps @@ -29,12 +30,14 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,KayentaMetric,Scopes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,MetricResult,Measurements API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,NginxTrafficRouting,StableIngresses +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,OAuth2Config,Scopes API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,PrometheusMetric,Headers API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,Args API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,DryRun API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,MeasurementRetention API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutAnalysis,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Analyses +API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,DryRun API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStep,Templates API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutExperimentStepAnalysisTemplateRef,Args API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,ALBs @@ -46,7 +49,10 @@ API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,TLSRoute,SNIHosts API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,TrafficWeights,Additional API rule violation: list_type_missing,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,WebMetric,Headers +API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,Authentication,OAuth2 API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,MetricProvider,SkyWalking +API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,OAuth2Config,ClientID +API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,OAuth2Config,TokenURL API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,ALBs API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,RolloutStatus,HPAReplicas API rule violation: names_match,github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1,Sigv4Config,RoleARN diff --git a/pkg/apis/rollouts/v1alpha1/analysis_types.go b/pkg/apis/rollouts/v1alpha1/analysis_types.go index d7d1685d04..3a287ec856 100644 --- a/pkg/apis/rollouts/v1alpha1/analysis_types.go +++ b/pkg/apis/rollouts/v1alpha1/analysis_types.go @@ -56,7 +56,7 @@ type AnalysisTemplateSpec struct { // Metrics contains the list of metrics to query as part of an analysis run // +patchMergeKey=name // +patchStrategy=merge - Metrics []Metric `json:"metrics" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=metrics"` + Metrics []Metric `json:"metrics,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=metrics"` // Args are the list of arguments to the template // +patchMergeKey=name // +patchStrategy=merge @@ -72,6 +72,10 @@ type AnalysisTemplateSpec struct { // +patchStrategy=merge // +optional MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,4,rep,name=measurementRetention"` + // Templates reference to a list of analysis templates to combine with the rest of the metrics for an AnalysisRun + // +patchMergeKey=templateName + // +patchStrategy=merge + Templates []AnalysisTemplateRef `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"templateName" protobuf:"bytes,5,rep,name=templates"` } // DurationString is a string representing a duration (e.g. 30s, 5m, 1h) @@ -134,6 +138,16 @@ type MeasurementRetention struct { Limit int32 `json:"limit" protobuf:"varint,2,opt,name=limit"` } +// TTLStrategy defines the strategy for the time to live depending on if the analysis succeeded or failed +type TTLStrategy struct { + // SecondsAfterCompletion is the number of seconds to live after completion. + SecondsAfterCompletion *int32 `json:"secondsAfterCompletion,omitempty" protobuf:"varint,1,opt,name=secondsAfterCompletion"` + // SecondsAfterFailure is the number of seconds to live after failure. + SecondsAfterFailure *int32 `json:"secondsAfterFailure,omitempty" protobuf:"varint,2,opt,name=secondsAfterFailure"` + // SecondsAfterSuccess is the number of seconds to live after success. + SecondsAfterSuccess *int32 `json:"secondsAfterSuccess,omitempty" protobuf:"varint,3,opt,name=secondsAfterSuccess"` +} + // EffectiveCount is the effective count based on whether or not count/interval is specified // If neither count or interval is specified, the effective count is 1 // If only interval is specified, metric runs indefinitely and there is no effective count (nil) @@ -210,9 +224,9 @@ type PrometheusMetric struct { Address string `json:"address,omitempty" protobuf:"bytes,1,opt,name=address"` // Query is a raw prometheus query to perform Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` - // Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus + // Authentication details // +optional - Authentication PrometheusAuth `json:"authentication,omitempty" protobuf:"bytes,3,opt,name=authentication"` + Authentication Authentication `json:"authentication,omitempty" protobuf:"bytes,3,opt,name=authentication"` // Timeout represents the duration within which a prometheus query should complete. It is expressed in seconds. // +optional Timeout *int64 `json:"timeout,omitempty" protobuf:"bytes,4,opt,name=timeout"` @@ -225,10 +239,26 @@ type PrometheusMetric struct { Headers []WebMetricHeader `json:"headers,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,6,opt,name=headers"` } -// PrometheusMetric defines the prometheus query to perform canary analysis -type PrometheusAuth struct { +// Authentication method +type Authentication struct { + // Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus + // +optional + Sigv4 Sigv4Config `json:"sigv4,omitempty" protobuf:"bytes,1,opt,name=sigv4"` + // OAuth2 config // +optional - Sigv4 Sigv4Config `json:"sigv4,omitempty" protobuf:"bytes,3,opt,name=sigv4"` + OAuth2 OAuth2Config `json:"oauth2,omitempty" protobuf:"bytes,2,opt,name=oauth2"` +} + +type OAuth2Config struct { + // OAuth2 provider token URL + TokenURL string `json:"tokenUrl,omitempty" protobuf:"bytes,1,name=tokenUrl"` + // OAuth2 client ID + ClientID string `json:"clientId,omitempty" protobuf:"bytes,2,name=clientId"` + // OAuth2 client secret + ClientSecret string `json:"clientSecret,omitempty" protobuf:"bytes,3,name=clientSecret"` + // OAuth2 scopes + // +optional + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,opt,name=scopes"` } type Sigv4Config struct { @@ -362,6 +392,9 @@ type AnalysisRunSpec struct { // +patchStrategy=merge // +optional MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,5,rep,name=measurementRetention"` + // TTLStrategy object contains the strategy for the time to live depending on if the analysis succeeded or failed + // +optional + TTLStrategy *TTLStrategy `json:"ttlStrategy,omitempty" protobuf:"bytes,6,opt,name=ttlStrategy"` } // Argument is an argument to an AnalysisRun @@ -380,8 +413,8 @@ type ValueFrom struct { // Secret is a reference to where a secret is stored. This field is one of the fields with valueFrom // +optional SecretKeyRef *SecretKeyRef `json:"secretKeyRef,omitempty" protobuf:"bytes,1,opt,name=secretKeyRef"` - //FieldRef is a reference to the fields in metadata which we are referencing. This field is one of the fields with - //valueFrom + // FieldRef is a reference to the fields in metadata which we are referencing. This field is one of the fields with + // valueFrom // +optional FieldRef *FieldRef `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` } @@ -407,6 +440,8 @@ type AnalysisRunStatus struct { RunSummary RunSummary `json:"runSummary,omitempty" protobuf:"bytes,5,opt,name=runSummary"` // DryRunSummary contains the final results from the metric executions in the dry-run mode DryRunSummary *RunSummary `json:"dryRunSummary,omitempty" protobuf:"bytes,6,opt,name=dryRunSummary"` + // CompletedAt indicates when the analysisRun completed + CompletedAt *metav1.Time `json:"completedAt,omitempty" protobuf:"bytes,7,opt,name=completedAt"` } // RunSummary contains the final results from the metric executions @@ -532,6 +567,9 @@ type WebMetric struct { // +kubebuilder:validation:Type=object // JSONBody is the body of the web metric in a json format (method must be POST/PUT) JSONBody json.RawMessage `json:"jsonBody,omitempty" protobuf:"bytes,8,opt,name=jsonBody,casttype=encoding/json.RawMessage"` + // Authentication details + // +optional + Authentication Authentication `json:"authentication,omitempty" protobuf:"bytes,9,opt,name=authentication"` } // WebMetricMethod is the available HTTP methods @@ -550,8 +588,20 @@ type WebMetricHeader struct { } type DatadogMetric struct { + // +kubebuilder:default="5m" + // Interval refers to the Interval time window in Datadog (default: 5m). Not to be confused with the polling rate for the metric. Interval DurationString `json:"interval,omitempty" protobuf:"bytes,1,opt,name=interval,casttype=DurationString"` - Query string `json:"query" protobuf:"bytes,2,opt,name=query"` + Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` + // Queries is a map of query_name_as_key: query. You can then use query_name_as_key inside Formula.Used for v2 + // +kubebuilder:validation:Type=object + Queries map[string]string `json:"queries,omitempty" protobuf:"bytes,3,opt,name=queries"` + // Formula refers to the Formula made up of the queries. Only useful with Queries. Used for v2 + Formula string `json:"formula,omitempty" protobuf:"bytes,4,opt,name=formula"` // ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated. - ApiVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` + // +kubebuilder:validation:Enum=v1;v2 + // +kubebuilder:default=v1 + ApiVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // +kubebuilder:validation:Enum=avg;min;max;sum;last;percentile;mean;l2norm;area + // Aggregator is a type of aggregator to use for metrics-based queries (default: ""). Used for v2 + Aggregator string `json:"aggregator,omitempty" protobuf:"bytes,6,opt,name=aggregator"` } diff --git a/pkg/apis/rollouts/v1alpha1/experiment_types.go b/pkg/apis/rollouts/v1alpha1/experiment_types.go index b7183cbc45..3151b65681 100644 --- a/pkg/apis/rollouts/v1alpha1/experiment_types.go +++ b/pkg/apis/rollouts/v1alpha1/experiment_types.go @@ -65,6 +65,9 @@ type ExperimentSpec struct { // +patchStrategy=merge // +optional MeasurementRetention []MeasurementRetention `json:"measurementRetention,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,8,rep,name=measurementRetention"` + // AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns + // +optional + AnalysisRunMetadata AnalysisRunMetadata `json:"analysisRunMetadata,omitempty" protobuf:"bytes,9,opt,name=analysisRunMetadata"` } type TemplateSpec struct { diff --git a/pkg/apis/rollouts/v1alpha1/generated.pb.go b/pkg/apis/rollouts/v1alpha1/generated.pb.go index 26c7e722a0..cab169a9d0 100644 --- a/pkg/apis/rollouts/v1alpha1/generated.pb.go +++ b/pkg/apis/rollouts/v1alpha1/generated.pb.go @@ -1,6 +1,4 @@ /* -Copyright 2024 The Kubernetes sample-controller Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -385,10 +383,38 @@ func (m *AnalysisTemplateList) XXX_DiscardUnknown() { var xxx_messageInfo_AnalysisTemplateList proto.InternalMessageInfo +func (m *AnalysisTemplateRef) Reset() { *m = AnalysisTemplateRef{} } +func (*AnalysisTemplateRef) ProtoMessage() {} +func (*AnalysisTemplateRef) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{12} +} +func (m *AnalysisTemplateRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AnalysisTemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AnalysisTemplateRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_AnalysisTemplateRef.Merge(m, src) +} +func (m *AnalysisTemplateRef) XXX_Size() int { + return m.Size() +} +func (m *AnalysisTemplateRef) XXX_DiscardUnknown() { + xxx_messageInfo_AnalysisTemplateRef.DiscardUnknown(m) +} + +var xxx_messageInfo_AnalysisTemplateRef proto.InternalMessageInfo + func (m *AnalysisTemplateSpec) Reset() { *m = AnalysisTemplateSpec{} } func (*AnalysisTemplateSpec) ProtoMessage() {} func (*AnalysisTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{12} + return fileDescriptor_e0e705f843545fab, []int{13} } func (m *AnalysisTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +442,7 @@ var xxx_messageInfo_AnalysisTemplateSpec proto.InternalMessageInfo func (m *AntiAffinity) Reset() { *m = AntiAffinity{} } func (*AntiAffinity) ProtoMessage() {} func (*AntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{13} + return fileDescriptor_e0e705f843545fab, []int{14} } func (m *AntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +470,7 @@ var xxx_messageInfo_AntiAffinity proto.InternalMessageInfo func (m *ApisixRoute) Reset() { *m = ApisixRoute{} } func (*ApisixRoute) ProtoMessage() {} func (*ApisixRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{14} + return fileDescriptor_e0e705f843545fab, []int{15} } func (m *ApisixRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +498,7 @@ var xxx_messageInfo_ApisixRoute proto.InternalMessageInfo func (m *ApisixTrafficRouting) Reset() { *m = ApisixTrafficRouting{} } func (*ApisixTrafficRouting) ProtoMessage() {} func (*ApisixTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{15} + return fileDescriptor_e0e705f843545fab, []int{16} } func (m *ApisixTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +526,7 @@ var xxx_messageInfo_ApisixTrafficRouting proto.InternalMessageInfo func (m *AppMeshTrafficRouting) Reset() { *m = AppMeshTrafficRouting{} } func (*AppMeshTrafficRouting) ProtoMessage() {} func (*AppMeshTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{16} + return fileDescriptor_e0e705f843545fab, []int{17} } func (m *AppMeshTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +554,7 @@ var xxx_messageInfo_AppMeshTrafficRouting proto.InternalMessageInfo func (m *AppMeshVirtualNodeGroup) Reset() { *m = AppMeshVirtualNodeGroup{} } func (*AppMeshVirtualNodeGroup) ProtoMessage() {} func (*AppMeshVirtualNodeGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{17} + return fileDescriptor_e0e705f843545fab, []int{18} } func (m *AppMeshVirtualNodeGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +582,7 @@ var xxx_messageInfo_AppMeshVirtualNodeGroup proto.InternalMessageInfo func (m *AppMeshVirtualNodeReference) Reset() { *m = AppMeshVirtualNodeReference{} } func (*AppMeshVirtualNodeReference) ProtoMessage() {} func (*AppMeshVirtualNodeReference) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{18} + return fileDescriptor_e0e705f843545fab, []int{19} } func (m *AppMeshVirtualNodeReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +610,7 @@ var xxx_messageInfo_AppMeshVirtualNodeReference proto.InternalMessageInfo func (m *AppMeshVirtualService) Reset() { *m = AppMeshVirtualService{} } func (*AppMeshVirtualService) ProtoMessage() {} func (*AppMeshVirtualService) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{19} + return fileDescriptor_e0e705f843545fab, []int{20} } func (m *AppMeshVirtualService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +638,7 @@ var xxx_messageInfo_AppMeshVirtualService proto.InternalMessageInfo func (m *Argument) Reset() { *m = Argument{} } func (*Argument) ProtoMessage() {} func (*Argument) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{20} + return fileDescriptor_e0e705f843545fab, []int{21} } func (m *Argument) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +666,7 @@ var xxx_messageInfo_Argument proto.InternalMessageInfo func (m *ArgumentValueFrom) Reset() { *m = ArgumentValueFrom{} } func (*ArgumentValueFrom) ProtoMessage() {} func (*ArgumentValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{21} + return fileDescriptor_e0e705f843545fab, []int{22} } func (m *ArgumentValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -665,10 +691,38 @@ func (m *ArgumentValueFrom) XXX_DiscardUnknown() { var xxx_messageInfo_ArgumentValueFrom proto.InternalMessageInfo +func (m *Authentication) Reset() { *m = Authentication{} } +func (*Authentication) ProtoMessage() {} +func (*Authentication) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{23} +} +func (m *Authentication) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Authentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Authentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_Authentication.Merge(m, src) +} +func (m *Authentication) XXX_Size() int { + return m.Size() +} +func (m *Authentication) XXX_DiscardUnknown() { + xxx_messageInfo_Authentication.DiscardUnknown(m) +} + +var xxx_messageInfo_Authentication proto.InternalMessageInfo + func (m *AwsResourceRef) Reset() { *m = AwsResourceRef{} } func (*AwsResourceRef) ProtoMessage() {} func (*AwsResourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{22} + return fileDescriptor_e0e705f843545fab, []int{24} } func (m *AwsResourceRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +750,7 @@ var xxx_messageInfo_AwsResourceRef proto.InternalMessageInfo func (m *BlueGreenStatus) Reset() { *m = BlueGreenStatus{} } func (*BlueGreenStatus) ProtoMessage() {} func (*BlueGreenStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{23} + return fileDescriptor_e0e705f843545fab, []int{25} } func (m *BlueGreenStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +778,7 @@ var xxx_messageInfo_BlueGreenStatus proto.InternalMessageInfo func (m *BlueGreenStrategy) Reset() { *m = BlueGreenStrategy{} } func (*BlueGreenStrategy) ProtoMessage() {} func (*BlueGreenStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{24} + return fileDescriptor_e0e705f843545fab, []int{26} } func (m *BlueGreenStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +806,7 @@ var xxx_messageInfo_BlueGreenStrategy proto.InternalMessageInfo func (m *CanaryStatus) Reset() { *m = CanaryStatus{} } func (*CanaryStatus) ProtoMessage() {} func (*CanaryStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{25} + return fileDescriptor_e0e705f843545fab, []int{27} } func (m *CanaryStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +834,7 @@ var xxx_messageInfo_CanaryStatus proto.InternalMessageInfo func (m *CanaryStep) Reset() { *m = CanaryStep{} } func (*CanaryStep) ProtoMessage() {} func (*CanaryStep) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{26} + return fileDescriptor_e0e705f843545fab, []int{28} } func (m *CanaryStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +862,7 @@ var xxx_messageInfo_CanaryStep proto.InternalMessageInfo func (m *CanaryStrategy) Reset() { *m = CanaryStrategy{} } func (*CanaryStrategy) ProtoMessage() {} func (*CanaryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{27} + return fileDescriptor_e0e705f843545fab, []int{29} } func (m *CanaryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +890,7 @@ var xxx_messageInfo_CanaryStrategy proto.InternalMessageInfo func (m *CloudWatchMetric) Reset() { *m = CloudWatchMetric{} } func (*CloudWatchMetric) ProtoMessage() {} func (*CloudWatchMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{28} + return fileDescriptor_e0e705f843545fab, []int{30} } func (m *CloudWatchMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +918,7 @@ var xxx_messageInfo_CloudWatchMetric proto.InternalMessageInfo func (m *CloudWatchMetricDataQuery) Reset() { *m = CloudWatchMetricDataQuery{} } func (*CloudWatchMetricDataQuery) ProtoMessage() {} func (*CloudWatchMetricDataQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{29} + return fileDescriptor_e0e705f843545fab, []int{31} } func (m *CloudWatchMetricDataQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +946,7 @@ var xxx_messageInfo_CloudWatchMetricDataQuery proto.InternalMessageInfo func (m *CloudWatchMetricStat) Reset() { *m = CloudWatchMetricStat{} } func (*CloudWatchMetricStat) ProtoMessage() {} func (*CloudWatchMetricStat) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{30} + return fileDescriptor_e0e705f843545fab, []int{32} } func (m *CloudWatchMetricStat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +974,7 @@ var xxx_messageInfo_CloudWatchMetricStat proto.InternalMessageInfo func (m *CloudWatchMetricStatMetric) Reset() { *m = CloudWatchMetricStatMetric{} } func (*CloudWatchMetricStatMetric) ProtoMessage() {} func (*CloudWatchMetricStatMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{31} + return fileDescriptor_e0e705f843545fab, []int{33} } func (m *CloudWatchMetricStatMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +1002,7 @@ var xxx_messageInfo_CloudWatchMetricStatMetric proto.InternalMessageInfo func (m *CloudWatchMetricStatMetricDimension) Reset() { *m = CloudWatchMetricStatMetricDimension{} } func (*CloudWatchMetricStatMetricDimension) ProtoMessage() {} func (*CloudWatchMetricStatMetricDimension) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{32} + return fileDescriptor_e0e705f843545fab, []int{34} } func (m *CloudWatchMetricStatMetricDimension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1030,7 @@ var xxx_messageInfo_CloudWatchMetricStatMetricDimension proto.InternalMessageInf func (m *ClusterAnalysisTemplate) Reset() { *m = ClusterAnalysisTemplate{} } func (*ClusterAnalysisTemplate) ProtoMessage() {} func (*ClusterAnalysisTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{33} + return fileDescriptor_e0e705f843545fab, []int{35} } func (m *ClusterAnalysisTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1058,7 @@ var xxx_messageInfo_ClusterAnalysisTemplate proto.InternalMessageInfo func (m *ClusterAnalysisTemplateList) Reset() { *m = ClusterAnalysisTemplateList{} } func (*ClusterAnalysisTemplateList) ProtoMessage() {} func (*ClusterAnalysisTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{34} + return fileDescriptor_e0e705f843545fab, []int{36} } func (m *ClusterAnalysisTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1086,7 @@ var xxx_messageInfo_ClusterAnalysisTemplateList proto.InternalMessageInfo func (m *DatadogMetric) Reset() { *m = DatadogMetric{} } func (*DatadogMetric) ProtoMessage() {} func (*DatadogMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{35} + return fileDescriptor_e0e705f843545fab, []int{37} } func (m *DatadogMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1114,7 @@ var xxx_messageInfo_DatadogMetric proto.InternalMessageInfo func (m *DryRun) Reset() { *m = DryRun{} } func (*DryRun) ProtoMessage() {} func (*DryRun) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{36} + return fileDescriptor_e0e705f843545fab, []int{38} } func (m *DryRun) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1142,7 @@ var xxx_messageInfo_DryRun proto.InternalMessageInfo func (m *Experiment) Reset() { *m = Experiment{} } func (*Experiment) ProtoMessage() {} func (*Experiment) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{37} + return fileDescriptor_e0e705f843545fab, []int{39} } func (m *Experiment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1170,7 @@ var xxx_messageInfo_Experiment proto.InternalMessageInfo func (m *ExperimentAnalysisRunStatus) Reset() { *m = ExperimentAnalysisRunStatus{} } func (*ExperimentAnalysisRunStatus) ProtoMessage() {} func (*ExperimentAnalysisRunStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{38} + return fileDescriptor_e0e705f843545fab, []int{40} } func (m *ExperimentAnalysisRunStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1198,7 @@ var xxx_messageInfo_ExperimentAnalysisRunStatus proto.InternalMessageInfo func (m *ExperimentAnalysisTemplateRef) Reset() { *m = ExperimentAnalysisTemplateRef{} } func (*ExperimentAnalysisTemplateRef) ProtoMessage() {} func (*ExperimentAnalysisTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{39} + return fileDescriptor_e0e705f843545fab, []int{41} } func (m *ExperimentAnalysisTemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1226,7 @@ var xxx_messageInfo_ExperimentAnalysisTemplateRef proto.InternalMessageInfo func (m *ExperimentCondition) Reset() { *m = ExperimentCondition{} } func (*ExperimentCondition) ProtoMessage() {} func (*ExperimentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{40} + return fileDescriptor_e0e705f843545fab, []int{42} } func (m *ExperimentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1254,7 @@ var xxx_messageInfo_ExperimentCondition proto.InternalMessageInfo func (m *ExperimentList) Reset() { *m = ExperimentList{} } func (*ExperimentList) ProtoMessage() {} func (*ExperimentList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{41} + return fileDescriptor_e0e705f843545fab, []int{43} } func (m *ExperimentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1282,7 @@ var xxx_messageInfo_ExperimentList proto.InternalMessageInfo func (m *ExperimentSpec) Reset() { *m = ExperimentSpec{} } func (*ExperimentSpec) ProtoMessage() {} func (*ExperimentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{42} + return fileDescriptor_e0e705f843545fab, []int{44} } func (m *ExperimentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1310,7 @@ var xxx_messageInfo_ExperimentSpec proto.InternalMessageInfo func (m *ExperimentStatus) Reset() { *m = ExperimentStatus{} } func (*ExperimentStatus) ProtoMessage() {} func (*ExperimentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{43} + return fileDescriptor_e0e705f843545fab, []int{45} } func (m *ExperimentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1338,7 @@ var xxx_messageInfo_ExperimentStatus proto.InternalMessageInfo func (m *FieldRef) Reset() { *m = FieldRef{} } func (*FieldRef) ProtoMessage() {} func (*FieldRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{44} + return fileDescriptor_e0e705f843545fab, []int{46} } func (m *FieldRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1366,7 @@ var xxx_messageInfo_FieldRef proto.InternalMessageInfo func (m *GraphiteMetric) Reset() { *m = GraphiteMetric{} } func (*GraphiteMetric) ProtoMessage() {} func (*GraphiteMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{45} + return fileDescriptor_e0e705f843545fab, []int{47} } func (m *GraphiteMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1394,7 @@ var xxx_messageInfo_GraphiteMetric proto.InternalMessageInfo func (m *HeaderRoutingMatch) Reset() { *m = HeaderRoutingMatch{} } func (*HeaderRoutingMatch) ProtoMessage() {} func (*HeaderRoutingMatch) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{46} + return fileDescriptor_e0e705f843545fab, []int{48} } func (m *HeaderRoutingMatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1422,7 @@ var xxx_messageInfo_HeaderRoutingMatch proto.InternalMessageInfo func (m *InfluxdbMetric) Reset() { *m = InfluxdbMetric{} } func (*InfluxdbMetric) ProtoMessage() {} func (*InfluxdbMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{47} + return fileDescriptor_e0e705f843545fab, []int{49} } func (m *InfluxdbMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1450,7 @@ var xxx_messageInfo_InfluxdbMetric proto.InternalMessageInfo func (m *IstioDestinationRule) Reset() { *m = IstioDestinationRule{} } func (*IstioDestinationRule) ProtoMessage() {} func (*IstioDestinationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{48} + return fileDescriptor_e0e705f843545fab, []int{50} } func (m *IstioDestinationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1478,7 @@ var xxx_messageInfo_IstioDestinationRule proto.InternalMessageInfo func (m *IstioTrafficRouting) Reset() { *m = IstioTrafficRouting{} } func (*IstioTrafficRouting) ProtoMessage() {} func (*IstioTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{49} + return fileDescriptor_e0e705f843545fab, []int{51} } func (m *IstioTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1506,7 @@ var xxx_messageInfo_IstioTrafficRouting proto.InternalMessageInfo func (m *IstioVirtualService) Reset() { *m = IstioVirtualService{} } func (*IstioVirtualService) ProtoMessage() {} func (*IstioVirtualService) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{50} + return fileDescriptor_e0e705f843545fab, []int{52} } func (m *IstioVirtualService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1534,7 @@ var xxx_messageInfo_IstioVirtualService proto.InternalMessageInfo func (m *JobMetric) Reset() { *m = JobMetric{} } func (*JobMetric) ProtoMessage() {} func (*JobMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{51} + return fileDescriptor_e0e705f843545fab, []int{53} } func (m *JobMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1562,7 @@ var xxx_messageInfo_JobMetric proto.InternalMessageInfo func (m *KayentaMetric) Reset() { *m = KayentaMetric{} } func (*KayentaMetric) ProtoMessage() {} func (*KayentaMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{52} + return fileDescriptor_e0e705f843545fab, []int{54} } func (m *KayentaMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1590,7 @@ var xxx_messageInfo_KayentaMetric proto.InternalMessageInfo func (m *KayentaScope) Reset() { *m = KayentaScope{} } func (*KayentaScope) ProtoMessage() {} func (*KayentaScope) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{53} + return fileDescriptor_e0e705f843545fab, []int{55} } func (m *KayentaScope) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1618,7 @@ var xxx_messageInfo_KayentaScope proto.InternalMessageInfo func (m *KayentaThreshold) Reset() { *m = KayentaThreshold{} } func (*KayentaThreshold) ProtoMessage() {} func (*KayentaThreshold) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{54} + return fileDescriptor_e0e705f843545fab, []int{56} } func (m *KayentaThreshold) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1646,7 @@ var xxx_messageInfo_KayentaThreshold proto.InternalMessageInfo func (m *MangedRoutes) Reset() { *m = MangedRoutes{} } func (*MangedRoutes) ProtoMessage() {} func (*MangedRoutes) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{55} + return fileDescriptor_e0e705f843545fab, []int{57} } func (m *MangedRoutes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1674,7 @@ var xxx_messageInfo_MangedRoutes proto.InternalMessageInfo func (m *Measurement) Reset() { *m = Measurement{} } func (*Measurement) ProtoMessage() {} func (*Measurement) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{56} + return fileDescriptor_e0e705f843545fab, []int{58} } func (m *Measurement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1702,7 @@ var xxx_messageInfo_Measurement proto.InternalMessageInfo func (m *MeasurementRetention) Reset() { *m = MeasurementRetention{} } func (*MeasurementRetention) ProtoMessage() {} func (*MeasurementRetention) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{57} + return fileDescriptor_e0e705f843545fab, []int{59} } func (m *MeasurementRetention) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1730,7 @@ var xxx_messageInfo_MeasurementRetention proto.InternalMessageInfo func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{58} + return fileDescriptor_e0e705f843545fab, []int{60} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1758,7 @@ var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *MetricProvider) Reset() { *m = MetricProvider{} } func (*MetricProvider) ProtoMessage() {} func (*MetricProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{59} + return fileDescriptor_e0e705f843545fab, []int{61} } func (m *MetricProvider) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1786,7 @@ var xxx_messageInfo_MetricProvider proto.InternalMessageInfo func (m *MetricResult) Reset() { *m = MetricResult{} } func (*MetricResult) ProtoMessage() {} func (*MetricResult) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{60} + return fileDescriptor_e0e705f843545fab, []int{62} } func (m *MetricResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1814,7 @@ var xxx_messageInfo_MetricResult proto.InternalMessageInfo func (m *NewRelicMetric) Reset() { *m = NewRelicMetric{} } func (*NewRelicMetric) ProtoMessage() {} func (*NewRelicMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{61} + return fileDescriptor_e0e705f843545fab, []int{63} } func (m *NewRelicMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1842,7 @@ var xxx_messageInfo_NewRelicMetric proto.InternalMessageInfo func (m *NginxTrafficRouting) Reset() { *m = NginxTrafficRouting{} } func (*NginxTrafficRouting) ProtoMessage() {} func (*NginxTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{62} + return fileDescriptor_e0e705f843545fab, []int{64} } func (m *NginxTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1813,10 +1867,38 @@ func (m *NginxTrafficRouting) XXX_DiscardUnknown() { var xxx_messageInfo_NginxTrafficRouting proto.InternalMessageInfo +func (m *OAuth2Config) Reset() { *m = OAuth2Config{} } +func (*OAuth2Config) ProtoMessage() {} +func (*OAuth2Config) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{65} +} +func (m *OAuth2Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuth2Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuth2Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuth2Config.Merge(m, src) +} +func (m *OAuth2Config) XXX_Size() int { + return m.Size() +} +func (m *OAuth2Config) XXX_DiscardUnknown() { + xxx_messageInfo_OAuth2Config.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuth2Config proto.InternalMessageInfo + func (m *ObjectRef) Reset() { *m = ObjectRef{} } func (*ObjectRef) ProtoMessage() {} func (*ObjectRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{63} + return fileDescriptor_e0e705f843545fab, []int{66} } func (m *ObjectRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1926,7 @@ var xxx_messageInfo_ObjectRef proto.InternalMessageInfo func (m *PauseCondition) Reset() { *m = PauseCondition{} } func (*PauseCondition) ProtoMessage() {} func (*PauseCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{64} + return fileDescriptor_e0e705f843545fab, []int{67} } func (m *PauseCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1954,7 @@ var xxx_messageInfo_PauseCondition proto.InternalMessageInfo func (m *PingPongSpec) Reset() { *m = PingPongSpec{} } func (*PingPongSpec) ProtoMessage() {} func (*PingPongSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{65} + return fileDescriptor_e0e705f843545fab, []int{68} } func (m *PingPongSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1982,7 @@ var xxx_messageInfo_PingPongSpec proto.InternalMessageInfo func (m *PodTemplateMetadata) Reset() { *m = PodTemplateMetadata{} } func (*PodTemplateMetadata) ProtoMessage() {} func (*PodTemplateMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{66} + return fileDescriptor_e0e705f843545fab, []int{69} } func (m *PodTemplateMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1930,7 +2012,7 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Reset() { } func (*PreferredDuringSchedulingIgnoredDuringExecution) ProtoMessage() {} func (*PreferredDuringSchedulingIgnoredDuringExecution) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{67} + return fileDescriptor_e0e705f843545fab, []int{70} } func (m *PreferredDuringSchedulingIgnoredDuringExecution) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1955,38 +2037,10 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) XXX_DiscardUnknown() { var xxx_messageInfo_PreferredDuringSchedulingIgnoredDuringExecution proto.InternalMessageInfo -func (m *PrometheusAuth) Reset() { *m = PrometheusAuth{} } -func (*PrometheusAuth) ProtoMessage() {} -func (*PrometheusAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{68} -} -func (m *PrometheusAuth) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrometheusAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PrometheusAuth) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrometheusAuth.Merge(m, src) -} -func (m *PrometheusAuth) XXX_Size() int { - return m.Size() -} -func (m *PrometheusAuth) XXX_DiscardUnknown() { - xxx_messageInfo_PrometheusAuth.DiscardUnknown(m) -} - -var xxx_messageInfo_PrometheusAuth proto.InternalMessageInfo - func (m *PrometheusMetric) Reset() { *m = PrometheusMetric{} } func (*PrometheusMetric) ProtoMessage() {} func (*PrometheusMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{69} + return fileDescriptor_e0e705f843545fab, []int{71} } func (m *PrometheusMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2016,7 +2070,7 @@ func (m *RequiredDuringSchedulingIgnoredDuringExecution) Reset() { } func (*RequiredDuringSchedulingIgnoredDuringExecution) ProtoMessage() {} func (*RequiredDuringSchedulingIgnoredDuringExecution) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{70} + return fileDescriptor_e0e705f843545fab, []int{72} } func (m *RequiredDuringSchedulingIgnoredDuringExecution) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2044,7 +2098,7 @@ var xxx_messageInfo_RequiredDuringSchedulingIgnoredDuringExecution proto.Interna func (m *RollbackWindowSpec) Reset() { *m = RollbackWindowSpec{} } func (*RollbackWindowSpec) ProtoMessage() {} func (*RollbackWindowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{71} + return fileDescriptor_e0e705f843545fab, []int{73} } func (m *RollbackWindowSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2072,7 +2126,7 @@ var xxx_messageInfo_RollbackWindowSpec proto.InternalMessageInfo func (m *Rollout) Reset() { *m = Rollout{} } func (*Rollout) ProtoMessage() {} func (*Rollout) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{72} + return fileDescriptor_e0e705f843545fab, []int{74} } func (m *Rollout) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2100,7 +2154,7 @@ var xxx_messageInfo_Rollout proto.InternalMessageInfo func (m *RolloutAnalysis) Reset() { *m = RolloutAnalysis{} } func (*RolloutAnalysis) ProtoMessage() {} func (*RolloutAnalysis) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{73} + return fileDescriptor_e0e705f843545fab, []int{75} } func (m *RolloutAnalysis) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2128,7 +2182,7 @@ var xxx_messageInfo_RolloutAnalysis proto.InternalMessageInfo func (m *RolloutAnalysisBackground) Reset() { *m = RolloutAnalysisBackground{} } func (*RolloutAnalysisBackground) ProtoMessage() {} func (*RolloutAnalysisBackground) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{74} + return fileDescriptor_e0e705f843545fab, []int{76} } func (m *RolloutAnalysisBackground) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2156,7 +2210,7 @@ var xxx_messageInfo_RolloutAnalysisBackground proto.InternalMessageInfo func (m *RolloutAnalysisRunStatus) Reset() { *m = RolloutAnalysisRunStatus{} } func (*RolloutAnalysisRunStatus) ProtoMessage() {} func (*RolloutAnalysisRunStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{75} + return fileDescriptor_e0e705f843545fab, []int{77} } func (m *RolloutAnalysisRunStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2181,38 +2235,10 @@ func (m *RolloutAnalysisRunStatus) XXX_DiscardUnknown() { var xxx_messageInfo_RolloutAnalysisRunStatus proto.InternalMessageInfo -func (m *RolloutAnalysisTemplate) Reset() { *m = RolloutAnalysisTemplate{} } -func (*RolloutAnalysisTemplate) ProtoMessage() {} -func (*RolloutAnalysisTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{76} -} -func (m *RolloutAnalysisTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RolloutAnalysisTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RolloutAnalysisTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_RolloutAnalysisTemplate.Merge(m, src) -} -func (m *RolloutAnalysisTemplate) XXX_Size() int { - return m.Size() -} -func (m *RolloutAnalysisTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_RolloutAnalysisTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_RolloutAnalysisTemplate proto.InternalMessageInfo - func (m *RolloutCondition) Reset() { *m = RolloutCondition{} } func (*RolloutCondition) ProtoMessage() {} func (*RolloutCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{77} + return fileDescriptor_e0e705f843545fab, []int{78} } func (m *RolloutCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2240,7 +2266,7 @@ var xxx_messageInfo_RolloutCondition proto.InternalMessageInfo func (m *RolloutExperimentStep) Reset() { *m = RolloutExperimentStep{} } func (*RolloutExperimentStep) ProtoMessage() {} func (*RolloutExperimentStep) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{78} + return fileDescriptor_e0e705f843545fab, []int{79} } func (m *RolloutExperimentStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2270,7 +2296,7 @@ func (m *RolloutExperimentStepAnalysisTemplateRef) Reset() { } func (*RolloutExperimentStepAnalysisTemplateRef) ProtoMessage() {} func (*RolloutExperimentStepAnalysisTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{79} + return fileDescriptor_e0e705f843545fab, []int{80} } func (m *RolloutExperimentStepAnalysisTemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2298,7 +2324,7 @@ var xxx_messageInfo_RolloutExperimentStepAnalysisTemplateRef proto.InternalMessa func (m *RolloutExperimentTemplate) Reset() { *m = RolloutExperimentTemplate{} } func (*RolloutExperimentTemplate) ProtoMessage() {} func (*RolloutExperimentTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{80} + return fileDescriptor_e0e705f843545fab, []int{81} } func (m *RolloutExperimentTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2326,7 +2352,7 @@ var xxx_messageInfo_RolloutExperimentTemplate proto.InternalMessageInfo func (m *RolloutList) Reset() { *m = RolloutList{} } func (*RolloutList) ProtoMessage() {} func (*RolloutList) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{81} + return fileDescriptor_e0e705f843545fab, []int{82} } func (m *RolloutList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2354,7 +2380,7 @@ var xxx_messageInfo_RolloutList proto.InternalMessageInfo func (m *RolloutPause) Reset() { *m = RolloutPause{} } func (*RolloutPause) ProtoMessage() {} func (*RolloutPause) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{82} + return fileDescriptor_e0e705f843545fab, []int{83} } func (m *RolloutPause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2382,7 +2408,7 @@ var xxx_messageInfo_RolloutPause proto.InternalMessageInfo func (m *RolloutSpec) Reset() { *m = RolloutSpec{} } func (*RolloutSpec) ProtoMessage() {} func (*RolloutSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{83} + return fileDescriptor_e0e705f843545fab, []int{84} } func (m *RolloutSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2410,7 +2436,7 @@ var xxx_messageInfo_RolloutSpec proto.InternalMessageInfo func (m *RolloutStatus) Reset() { *m = RolloutStatus{} } func (*RolloutStatus) ProtoMessage() {} func (*RolloutStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{84} + return fileDescriptor_e0e705f843545fab, []int{85} } func (m *RolloutStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2438,7 +2464,7 @@ var xxx_messageInfo_RolloutStatus proto.InternalMessageInfo func (m *RolloutStrategy) Reset() { *m = RolloutStrategy{} } func (*RolloutStrategy) ProtoMessage() {} func (*RolloutStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{85} + return fileDescriptor_e0e705f843545fab, []int{86} } func (m *RolloutStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2466,7 +2492,7 @@ var xxx_messageInfo_RolloutStrategy proto.InternalMessageInfo func (m *RolloutTrafficRouting) Reset() { *m = RolloutTrafficRouting{} } func (*RolloutTrafficRouting) ProtoMessage() {} func (*RolloutTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{86} + return fileDescriptor_e0e705f843545fab, []int{87} } func (m *RolloutTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2494,7 +2520,7 @@ var xxx_messageInfo_RolloutTrafficRouting proto.InternalMessageInfo func (m *RouteMatch) Reset() { *m = RouteMatch{} } func (*RouteMatch) ProtoMessage() {} func (*RouteMatch) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{87} + return fileDescriptor_e0e705f843545fab, []int{88} } func (m *RouteMatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2522,7 +2548,7 @@ var xxx_messageInfo_RouteMatch proto.InternalMessageInfo func (m *RunSummary) Reset() { *m = RunSummary{} } func (*RunSummary) ProtoMessage() {} func (*RunSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{88} + return fileDescriptor_e0e705f843545fab, []int{89} } func (m *RunSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2550,7 +2576,7 @@ var xxx_messageInfo_RunSummary proto.InternalMessageInfo func (m *SMITrafficRouting) Reset() { *m = SMITrafficRouting{} } func (*SMITrafficRouting) ProtoMessage() {} func (*SMITrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{89} + return fileDescriptor_e0e705f843545fab, []int{90} } func (m *SMITrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2578,7 +2604,7 @@ var xxx_messageInfo_SMITrafficRouting proto.InternalMessageInfo func (m *ScopeDetail) Reset() { *m = ScopeDetail{} } func (*ScopeDetail) ProtoMessage() {} func (*ScopeDetail) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{90} + return fileDescriptor_e0e705f843545fab, []int{91} } func (m *ScopeDetail) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2606,7 +2632,7 @@ var xxx_messageInfo_ScopeDetail proto.InternalMessageInfo func (m *SecretKeyRef) Reset() { *m = SecretKeyRef{} } func (*SecretKeyRef) ProtoMessage() {} func (*SecretKeyRef) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{91} + return fileDescriptor_e0e705f843545fab, []int{92} } func (m *SecretKeyRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2634,7 +2660,7 @@ var xxx_messageInfo_SecretKeyRef proto.InternalMessageInfo func (m *SetCanaryScale) Reset() { *m = SetCanaryScale{} } func (*SetCanaryScale) ProtoMessage() {} func (*SetCanaryScale) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{92} + return fileDescriptor_e0e705f843545fab, []int{93} } func (m *SetCanaryScale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2662,7 +2688,7 @@ var xxx_messageInfo_SetCanaryScale proto.InternalMessageInfo func (m *SetHeaderRoute) Reset() { *m = SetHeaderRoute{} } func (*SetHeaderRoute) ProtoMessage() {} func (*SetHeaderRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{93} + return fileDescriptor_e0e705f843545fab, []int{94} } func (m *SetHeaderRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2690,7 +2716,7 @@ var xxx_messageInfo_SetHeaderRoute proto.InternalMessageInfo func (m *SetMirrorRoute) Reset() { *m = SetMirrorRoute{} } func (*SetMirrorRoute) ProtoMessage() {} func (*SetMirrorRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{94} + return fileDescriptor_e0e705f843545fab, []int{95} } func (m *SetMirrorRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2718,7 +2744,7 @@ var xxx_messageInfo_SetMirrorRoute proto.InternalMessageInfo func (m *Sigv4Config) Reset() { *m = Sigv4Config{} } func (*Sigv4Config) ProtoMessage() {} func (*Sigv4Config) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{95} + return fileDescriptor_e0e705f843545fab, []int{96} } func (m *Sigv4Config) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2746,7 +2772,7 @@ var xxx_messageInfo_Sigv4Config proto.InternalMessageInfo func (m *SkyWalkingMetric) Reset() { *m = SkyWalkingMetric{} } func (*SkyWalkingMetric) ProtoMessage() {} func (*SkyWalkingMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{96} + return fileDescriptor_e0e705f843545fab, []int{97} } func (m *SkyWalkingMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2774,7 +2800,7 @@ var xxx_messageInfo_SkyWalkingMetric proto.InternalMessageInfo func (m *StickinessConfig) Reset() { *m = StickinessConfig{} } func (*StickinessConfig) ProtoMessage() {} func (*StickinessConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{97} + return fileDescriptor_e0e705f843545fab, []int{98} } func (m *StickinessConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2802,7 +2828,7 @@ var xxx_messageInfo_StickinessConfig proto.InternalMessageInfo func (m *StringMatch) Reset() { *m = StringMatch{} } func (*StringMatch) ProtoMessage() {} func (*StringMatch) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{98} + return fileDescriptor_e0e705f843545fab, []int{99} } func (m *StringMatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2830,7 +2856,7 @@ var xxx_messageInfo_StringMatch proto.InternalMessageInfo func (m *TCPRoute) Reset() { *m = TCPRoute{} } func (*TCPRoute) ProtoMessage() {} func (*TCPRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{99} + return fileDescriptor_e0e705f843545fab, []int{100} } func (m *TCPRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2858,7 +2884,7 @@ var xxx_messageInfo_TCPRoute proto.InternalMessageInfo func (m *TLSRoute) Reset() { *m = TLSRoute{} } func (*TLSRoute) ProtoMessage() {} func (*TLSRoute) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{100} + return fileDescriptor_e0e705f843545fab, []int{101} } func (m *TLSRoute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2883,10 +2909,38 @@ func (m *TLSRoute) XXX_DiscardUnknown() { var xxx_messageInfo_TLSRoute proto.InternalMessageInfo +func (m *TTLStrategy) Reset() { *m = TTLStrategy{} } +func (*TTLStrategy) ProtoMessage() {} +func (*TTLStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e0e705f843545fab, []int{102} +} +func (m *TTLStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TTLStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TTLStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TTLStrategy.Merge(m, src) +} +func (m *TTLStrategy) XXX_Size() int { + return m.Size() +} +func (m *TTLStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_TTLStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_TTLStrategy proto.InternalMessageInfo + func (m *TemplateService) Reset() { *m = TemplateService{} } func (*TemplateService) ProtoMessage() {} func (*TemplateService) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{101} + return fileDescriptor_e0e705f843545fab, []int{103} } func (m *TemplateService) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2914,7 +2968,7 @@ var xxx_messageInfo_TemplateService proto.InternalMessageInfo func (m *TemplateSpec) Reset() { *m = TemplateSpec{} } func (*TemplateSpec) ProtoMessage() {} func (*TemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{102} + return fileDescriptor_e0e705f843545fab, []int{104} } func (m *TemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2942,7 +2996,7 @@ var xxx_messageInfo_TemplateSpec proto.InternalMessageInfo func (m *TemplateStatus) Reset() { *m = TemplateStatus{} } func (*TemplateStatus) ProtoMessage() {} func (*TemplateStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{103} + return fileDescriptor_e0e705f843545fab, []int{105} } func (m *TemplateStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2970,7 +3024,7 @@ var xxx_messageInfo_TemplateStatus proto.InternalMessageInfo func (m *TraefikTrafficRouting) Reset() { *m = TraefikTrafficRouting{} } func (*TraefikTrafficRouting) ProtoMessage() {} func (*TraefikTrafficRouting) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{104} + return fileDescriptor_e0e705f843545fab, []int{106} } func (m *TraefikTrafficRouting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2998,7 +3052,7 @@ var xxx_messageInfo_TraefikTrafficRouting proto.InternalMessageInfo func (m *TrafficWeights) Reset() { *m = TrafficWeights{} } func (*TrafficWeights) ProtoMessage() {} func (*TrafficWeights) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{105} + return fileDescriptor_e0e705f843545fab, []int{107} } func (m *TrafficWeights) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3026,7 +3080,7 @@ var xxx_messageInfo_TrafficWeights proto.InternalMessageInfo func (m *ValueFrom) Reset() { *m = ValueFrom{} } func (*ValueFrom) ProtoMessage() {} func (*ValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{106} + return fileDescriptor_e0e705f843545fab, []int{108} } func (m *ValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3054,7 +3108,7 @@ var xxx_messageInfo_ValueFrom proto.InternalMessageInfo func (m *WavefrontMetric) Reset() { *m = WavefrontMetric{} } func (*WavefrontMetric) ProtoMessage() {} func (*WavefrontMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{107} + return fileDescriptor_e0e705f843545fab, []int{109} } func (m *WavefrontMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3082,7 +3136,7 @@ var xxx_messageInfo_WavefrontMetric proto.InternalMessageInfo func (m *WebMetric) Reset() { *m = WebMetric{} } func (*WebMetric) ProtoMessage() {} func (*WebMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{108} + return fileDescriptor_e0e705f843545fab, []int{110} } func (m *WebMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3110,7 +3164,7 @@ var xxx_messageInfo_WebMetric proto.InternalMessageInfo func (m *WebMetricHeader) Reset() { *m = WebMetricHeader{} } func (*WebMetricHeader) ProtoMessage() {} func (*WebMetricHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{109} + return fileDescriptor_e0e705f843545fab, []int{111} } func (m *WebMetricHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3138,7 +3192,7 @@ var xxx_messageInfo_WebMetricHeader proto.InternalMessageInfo func (m *WeightDestination) Reset() { *m = WeightDestination{} } func (*WeightDestination) ProtoMessage() {} func (*WeightDestination) Descriptor() ([]byte, []int) { - return fileDescriptor_e0e705f843545fab, []int{110} + return fileDescriptor_e0e705f843545fab, []int{112} } func (m *WeightDestination) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3178,6 +3232,7 @@ func init() { proto.RegisterType((*AnalysisRunStrategy)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisRunStrategy") proto.RegisterType((*AnalysisTemplate)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplate") proto.RegisterType((*AnalysisTemplateList)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateList") + proto.RegisterType((*AnalysisTemplateRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateRef") proto.RegisterType((*AnalysisTemplateSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AnalysisTemplateSpec") proto.RegisterType((*AntiAffinity)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AntiAffinity") proto.RegisterType((*ApisixRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ApisixRoute") @@ -3188,6 +3243,7 @@ func init() { proto.RegisterType((*AppMeshVirtualService)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AppMeshVirtualService") proto.RegisterType((*Argument)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Argument") proto.RegisterType((*ArgumentValueFrom)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ArgumentValueFrom") + proto.RegisterType((*Authentication)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Authentication") proto.RegisterType((*AwsResourceRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.AwsResourceRef") proto.RegisterType((*BlueGreenStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.BlueGreenStatus") proto.RegisterType((*BlueGreenStrategy)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.BlueGreenStrategy") @@ -3202,6 +3258,7 @@ func init() { proto.RegisterType((*ClusterAnalysisTemplate)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ClusterAnalysisTemplate") proto.RegisterType((*ClusterAnalysisTemplateList)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ClusterAnalysisTemplateList") proto.RegisterType((*DatadogMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DatadogMetric") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DatadogMetric.QueriesEntry") proto.RegisterType((*DryRun)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.DryRun") proto.RegisterType((*Experiment)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.Experiment") proto.RegisterType((*ExperimentAnalysisRunStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ExperimentAnalysisRunStatus") @@ -3233,6 +3290,7 @@ func init() { proto.RegisterType((*NewRelicMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NewRelicMetric") proto.RegisterType((*NginxTrafficRouting)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.NginxTrafficRouting.AdditionalIngressAnnotationsEntry") + proto.RegisterType((*OAuth2Config)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.OAuth2Config") proto.RegisterType((*ObjectRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.ObjectRef") proto.RegisterType((*PauseCondition)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PauseCondition") proto.RegisterType((*PingPongSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PingPongSpec") @@ -3240,7 +3298,6 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PodTemplateMetadata.LabelsEntry") proto.RegisterType((*PreferredDuringSchedulingIgnoredDuringExecution)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PreferredDuringSchedulingIgnoredDuringExecution") - proto.RegisterType((*PrometheusAuth)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusAuth") proto.RegisterType((*PrometheusMetric)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.PrometheusMetric") proto.RegisterType((*RequiredDuringSchedulingIgnoredDuringExecution)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RequiredDuringSchedulingIgnoredDuringExecution") proto.RegisterType((*RollbackWindowSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RollbackWindowSpec") @@ -3248,7 +3305,6 @@ func init() { proto.RegisterType((*RolloutAnalysis)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysis") proto.RegisterType((*RolloutAnalysisBackground)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysisBackground") proto.RegisterType((*RolloutAnalysisRunStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysisRunStatus") - proto.RegisterType((*RolloutAnalysisTemplate)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutAnalysisTemplate") proto.RegisterType((*RolloutCondition)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutCondition") proto.RegisterType((*RolloutExperimentStep)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutExperimentStep") proto.RegisterType((*RolloutExperimentStepAnalysisTemplateRef)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.RolloutExperimentStepAnalysisTemplateRef") @@ -3275,6 +3331,7 @@ func init() { proto.RegisterType((*StringMatch)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.StringMatch") proto.RegisterType((*TCPRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TCPRoute") proto.RegisterType((*TLSRoute)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TLSRoute") + proto.RegisterType((*TTLStrategy)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TTLStrategy") proto.RegisterType((*TemplateService)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateService") proto.RegisterType((*TemplateSpec)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateSpec") proto.RegisterType((*TemplateStatus)(nil), "github.com.argoproj.argo_rollouts.pkg.apis.rollouts.v1alpha1.TemplateStatus") @@ -3292,521 +3349,543 @@ func init() { } var fileDescriptor_e0e705f843545fab = []byte{ - // 8214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x6c, 0x24, 0xc9, - 0x75, 0xd8, 0x35, 0x87, 0x43, 0x72, 0x1e, 0xb9, 0x24, 0xb7, 0x76, 0x57, 0xcb, 0xe3, 0xdd, 0xee, - 0x9c, 0xfa, 0x9c, 0xcb, 0xca, 0x3a, 0x91, 0xd2, 0x7d, 0x24, 0x27, 0x9d, 0x72, 0xc9, 0x0c, 0xb9, - 0x7b, 0xcb, 0x3d, 0x72, 0x6f, 0xf6, 0x0d, 0xf7, 0x56, 0x96, 0x74, 0xb6, 0x9a, 0x33, 0xc5, 0x61, - 0x2f, 0x67, 0xba, 0xc7, 0xdd, 0x3d, 0xdc, 0xe5, 0xe9, 0x60, 0x9d, 0x2c, 0x9c, 0xa2, 0x18, 0x12, - 0xac, 0xc4, 0x16, 0x82, 0x20, 0x41, 0xa0, 0x18, 0x06, 0xec, 0xc4, 0xfe, 0x25, 0x24, 0xc8, 0x1f, - 0x03, 0x11, 0xe2, 0xd8, 0xd6, 0x8f, 0x38, 0x90, 0x7f, 0x24, 0xb2, 0x03, 0x98, 0x8e, 0xa8, 0xfc, - 0x49, 0x90, 0x40, 0x08, 0xe0, 0x20, 0xf0, 0xfe, 0x08, 0x82, 0xfa, 0xec, 0xea, 0x9e, 0x1e, 0xee, - 0xcc, 0x4e, 0x73, 0x75, 0x49, 0xfc, 0x6f, 0xa6, 0x5e, 0xd5, 0x7b, 0xaf, 0xeb, 0xe3, 0xd5, 0xab, - 0x57, 0xef, 0xbd, 0x82, 0xcd, 0x96, 0x1b, 0xed, 0xf5, 0x76, 0x56, 0x1a, 0x7e, 0x67, 0xd5, 0x09, - 0x5a, 0x7e, 0x37, 0xf0, 0xef, 0xf2, 0x1f, 0x1f, 0x0b, 0xfc, 0x76, 0xdb, 0xef, 0x45, 0xe1, 0x6a, - 0x77, 0xbf, 0xb5, 0xea, 0x74, 0xdd, 0x70, 0x55, 0x97, 0x1c, 0x7c, 0xc2, 0x69, 0x77, 0xf7, 0x9c, - 0x4f, 0xac, 0xb6, 0xa8, 0x47, 0x03, 0x27, 0xa2, 0xcd, 0x95, 0x6e, 0xe0, 0x47, 0x3e, 0xf9, 0x74, - 0x8c, 0x6d, 0x45, 0x61, 0xe3, 0x3f, 0x7e, 0x4e, 0xb5, 0x5d, 0xe9, 0xee, 0xb7, 0x56, 0x18, 0xb6, - 0x15, 0x5d, 0xa2, 0xb0, 0x2d, 0x7f, 0xcc, 0xe0, 0xa5, 0xe5, 0xb7, 0xfc, 0x55, 0x8e, 0x74, 0xa7, - 0xb7, 0xcb, 0xff, 0xf1, 0x3f, 0xfc, 0x97, 0x20, 0xb6, 0xfc, 0xec, 0xfe, 0x2b, 0xe1, 0x8a, 0xeb, - 0x33, 0xde, 0x56, 0x77, 0x9c, 0xa8, 0xb1, 0xb7, 0x7a, 0xd0, 0xc7, 0xd1, 0xb2, 0x6d, 0x54, 0x6a, - 0xf8, 0x01, 0xcd, 0xaa, 0xf3, 0x52, 0x5c, 0xa7, 0xe3, 0x34, 0xf6, 0x5c, 0x8f, 0x06, 0x87, 0xf1, - 0x57, 0x77, 0x68, 0xe4, 0x64, 0xb5, 0x5a, 0x1d, 0xd4, 0x2a, 0xe8, 0x79, 0x91, 0xdb, 0xa1, 0x7d, - 0x0d, 0xfe, 0xda, 0xc3, 0x1a, 0x84, 0x8d, 0x3d, 0xda, 0x71, 0xfa, 0xda, 0xbd, 0x38, 0xa8, 0x5d, - 0x2f, 0x72, 0xdb, 0xab, 0xae, 0x17, 0x85, 0x51, 0x90, 0x6e, 0x64, 0xff, 0xb8, 0x00, 0xa5, 0xca, - 0x66, 0xb5, 0x1e, 0x39, 0x51, 0x2f, 0x24, 0x5f, 0xb5, 0x60, 0xae, 0xed, 0x3b, 0xcd, 0xaa, 0xd3, - 0x76, 0xbc, 0x06, 0x0d, 0x96, 0xac, 0x67, 0xac, 0x2b, 0xb3, 0x2f, 0x6c, 0xae, 0x8c, 0x33, 0x5e, - 0x2b, 0x95, 0x7b, 0x21, 0xd2, 0xd0, 0xef, 0x05, 0x0d, 0x8a, 0x74, 0xb7, 0x7a, 0xfe, 0x7b, 0x47, - 0xe5, 0x27, 0x8e, 0x8f, 0xca, 0x73, 0x9b, 0x06, 0x25, 0x4c, 0xd0, 0x25, 0xdf, 0xb2, 0xe0, 0x6c, - 0xc3, 0xf1, 0x9c, 0xe0, 0x70, 0xdb, 0x09, 0x5a, 0x34, 0x7a, 0x3d, 0xf0, 0x7b, 0xdd, 0xa5, 0x89, - 0x53, 0xe0, 0xe6, 0x49, 0xc9, 0xcd, 0xd9, 0xb5, 0x34, 0x39, 0xec, 0xe7, 0x80, 0xf3, 0x15, 0x46, - 0xce, 0x4e, 0x9b, 0x9a, 0x7c, 0x15, 0x4e, 0x93, 0xaf, 0x7a, 0x9a, 0x1c, 0xf6, 0x73, 0x40, 0x3e, - 0x02, 0xd3, 0xae, 0xd7, 0x0a, 0x68, 0x18, 0x2e, 0x4d, 0x3e, 0x63, 0x5d, 0x29, 0x55, 0x17, 0x64, - 0xf3, 0xe9, 0x0d, 0x51, 0x8c, 0x0a, 0x6e, 0x7f, 0xa7, 0x00, 0x67, 0x2b, 0x9b, 0xd5, 0xed, 0xc0, - 0xd9, 0xdd, 0x75, 0x1b, 0xe8, 0xf7, 0x22, 0xd7, 0x6b, 0x99, 0x08, 0xac, 0x93, 0x11, 0x90, 0x97, - 0x61, 0x36, 0xa4, 0xc1, 0x81, 0xdb, 0xa0, 0x35, 0x3f, 0x88, 0xf8, 0xa0, 0x14, 0xab, 0xe7, 0x64, - 0xf5, 0xd9, 0x7a, 0x0c, 0x42, 0xb3, 0x1e, 0x6b, 0x16, 0xf8, 0x7e, 0x24, 0xe1, 0xbc, 0xcf, 0x4a, - 0x71, 0x33, 0x8c, 0x41, 0x68, 0xd6, 0x23, 0xeb, 0xb0, 0xe8, 0x78, 0x9e, 0x1f, 0x39, 0x91, 0xeb, - 0x7b, 0xb5, 0x80, 0xee, 0xba, 0xf7, 0xe5, 0x27, 0x2e, 0xc9, 0xb6, 0x8b, 0x95, 0x14, 0x1c, 0xfb, - 0x5a, 0x90, 0x6f, 0x5a, 0xb0, 0x18, 0x46, 0x6e, 0x63, 0xdf, 0xf5, 0x68, 0x18, 0xae, 0xf9, 0xde, - 0xae, 0xdb, 0x5a, 0x2a, 0xf2, 0x61, 0xbb, 0x39, 0xde, 0xb0, 0xd5, 0x53, 0x58, 0xab, 0xe7, 0x19, - 0x4b, 0xe9, 0x52, 0xec, 0xa3, 0x4e, 0x3e, 0x0a, 0x25, 0xd9, 0xa3, 0x34, 0x5c, 0x9a, 0x7a, 0xa6, - 0x70, 0xa5, 0x54, 0x3d, 0x73, 0x7c, 0x54, 0x2e, 0x6d, 0xa8, 0x42, 0x8c, 0xe1, 0xf6, 0x3a, 0x2c, - 0x55, 0x3a, 0x3b, 0x4e, 0x18, 0x3a, 0x4d, 0x3f, 0x48, 0x0d, 0xdd, 0x15, 0x98, 0xe9, 0x38, 0xdd, - 0xae, 0xeb, 0xb5, 0xd8, 0xd8, 0x31, 0x3c, 0x73, 0xc7, 0x47, 0xe5, 0x99, 0x2d, 0x59, 0x86, 0x1a, - 0x6a, 0xff, 0xc9, 0x04, 0xcc, 0x56, 0x3c, 0xa7, 0x7d, 0x18, 0xba, 0x21, 0xf6, 0x3c, 0xf2, 0x05, - 0x98, 0x61, 0x52, 0xab, 0xe9, 0x44, 0x8e, 0x5c, 0xe9, 0x1f, 0x5f, 0x11, 0x42, 0x64, 0xc5, 0x14, - 0x22, 0xf1, 0xe7, 0xb3, 0xda, 0x2b, 0x07, 0x9f, 0x58, 0x79, 0x73, 0xe7, 0x2e, 0x6d, 0x44, 0x5b, - 0x34, 0x72, 0xaa, 0x44, 0x8e, 0x02, 0xc4, 0x65, 0xa8, 0xb1, 0x12, 0x1f, 0x26, 0xc3, 0x2e, 0x6d, - 0xc8, 0x95, 0xbb, 0x35, 0xe6, 0x0a, 0x89, 0x59, 0xaf, 0x77, 0x69, 0xa3, 0x3a, 0x27, 0x49, 0x4f, - 0xb2, 0x7f, 0xc8, 0x09, 0x91, 0x7b, 0x30, 0x15, 0x72, 0x59, 0x26, 0x17, 0xe5, 0x9b, 0xf9, 0x91, - 0xe4, 0x68, 0xab, 0xf3, 0x92, 0xe8, 0x94, 0xf8, 0x8f, 0x92, 0x9c, 0xfd, 0x1f, 0x2d, 0x38, 0x67, - 0xd4, 0xae, 0x04, 0xad, 0x5e, 0x87, 0x7a, 0x11, 0x79, 0x06, 0x26, 0x3d, 0xa7, 0x43, 0xe5, 0xaa, - 0xd2, 0x2c, 0xdf, 0x74, 0x3a, 0x14, 0x39, 0x84, 0x3c, 0x0b, 0xc5, 0x03, 0xa7, 0xdd, 0xa3, 0xbc, - 0x93, 0x4a, 0xd5, 0x33, 0xb2, 0x4a, 0xf1, 0x2d, 0x56, 0x88, 0x02, 0x46, 0xde, 0x85, 0x12, 0xff, - 0x71, 0x2d, 0xf0, 0x3b, 0x39, 0x7d, 0x9a, 0xe4, 0xf0, 0x2d, 0x85, 0x56, 0x4c, 0x3f, 0xfd, 0x17, - 0x63, 0x82, 0xf6, 0x9f, 0x59, 0xb0, 0x60, 0x7c, 0xdc, 0xa6, 0x1b, 0x46, 0xe4, 0xf3, 0x7d, 0x93, - 0x67, 0x65, 0xb8, 0xc9, 0xc3, 0x5a, 0xf3, 0xa9, 0xb3, 0x28, 0xbf, 0x74, 0x46, 0x95, 0x18, 0x13, - 0xc7, 0x83, 0xa2, 0x1b, 0xd1, 0x4e, 0xb8, 0x34, 0xf1, 0x4c, 0xe1, 0xca, 0xec, 0x0b, 0x1b, 0xb9, - 0x0d, 0x63, 0xdc, 0xbf, 0x1b, 0x0c, 0x3f, 0x0a, 0x32, 0xf6, 0x3f, 0x2f, 0x24, 0x86, 0x6f, 0x4b, - 0xf1, 0xf1, 0xbe, 0x05, 0x53, 0x6d, 0x67, 0x87, 0xb6, 0xc5, 0xda, 0x9a, 0x7d, 0xe1, 0xed, 0xdc, - 0x38, 0x51, 0x34, 0x56, 0x36, 0x39, 0xfe, 0xab, 0x5e, 0x14, 0x1c, 0xc6, 0xd3, 0x4b, 0x14, 0xa2, - 0x24, 0x4e, 0xfe, 0x81, 0x05, 0xb3, 0xb1, 0x54, 0x53, 0xdd, 0xb2, 0x93, 0x3f, 0x33, 0xb1, 0x30, - 0x95, 0x1c, 0x69, 0x11, 0x6d, 0x40, 0xd0, 0xe4, 0x65, 0xf9, 0x93, 0x30, 0x6b, 0x7c, 0x02, 0x59, - 0x84, 0xc2, 0x3e, 0x3d, 0x14, 0x13, 0x1e, 0xd9, 0x4f, 0x72, 0x3e, 0x31, 0xc3, 0xe5, 0x94, 0xfe, - 0xd4, 0xc4, 0x2b, 0xd6, 0xf2, 0x6b, 0xb0, 0x98, 0x26, 0x38, 0x4a, 0x7b, 0xfb, 0x3b, 0x93, 0x89, - 0x89, 0xc9, 0x04, 0x01, 0xf1, 0x61, 0xba, 0x43, 0xa3, 0xc0, 0x6d, 0xa8, 0x21, 0x5b, 0x1f, 0xaf, - 0x97, 0xb6, 0x38, 0xb2, 0x78, 0x43, 0x14, 0xff, 0x43, 0x54, 0x54, 0xc8, 0x1e, 0x4c, 0x3a, 0x41, - 0x4b, 0x8d, 0xc9, 0xb5, 0x7c, 0x96, 0x65, 0x2c, 0x2a, 0x2a, 0x41, 0x2b, 0x44, 0x4e, 0x81, 0xac, - 0x42, 0x29, 0xa2, 0x41, 0xc7, 0xf5, 0x9c, 0x48, 0xec, 0xa0, 0x33, 0xd5, 0xb3, 0xb2, 0x5a, 0x69, - 0x5b, 0x01, 0x30, 0xae, 0x43, 0xda, 0x30, 0xd5, 0x0c, 0x0e, 0xb1, 0xe7, 0x2d, 0x4d, 0xe6, 0xd1, - 0x15, 0xeb, 0x1c, 0x57, 0x3c, 0x49, 0xc5, 0x7f, 0x94, 0x34, 0xc8, 0xaf, 0x5b, 0x70, 0xbe, 0x43, - 0x9d, 0xb0, 0x17, 0x50, 0xf6, 0x09, 0x48, 0x23, 0xea, 0xb1, 0x81, 0x5d, 0x2a, 0x72, 0xe2, 0x38, - 0xee, 0x38, 0xf4, 0x63, 0xae, 0x3e, 0x2d, 0x59, 0x39, 0x9f, 0x05, 0xc5, 0x4c, 0x6e, 0xec, 0x3f, - 0x99, 0x84, 0xb3, 0x7d, 0x82, 0x9d, 0xbc, 0x04, 0xc5, 0xee, 0x9e, 0x13, 0x2a, 0x49, 0x7d, 0x59, - 0x89, 0x89, 0x1a, 0x2b, 0x7c, 0x70, 0x54, 0x3e, 0xa3, 0x9a, 0xf0, 0x02, 0x14, 0x95, 0x99, 0xde, - 0xd4, 0xa1, 0x61, 0xe8, 0xb4, 0x94, 0xf8, 0x36, 0xa6, 0x09, 0x2f, 0x46, 0x05, 0x27, 0x7f, 0xdb, - 0x82, 0x33, 0x62, 0xca, 0x20, 0x0d, 0x7b, 0xed, 0x88, 0x6d, 0x51, 0xac, 0x5b, 0x6e, 0xe4, 0x31, - 0x3d, 0x05, 0xca, 0xea, 0x05, 0x49, 0xfd, 0x8c, 0x59, 0x1a, 0x62, 0x92, 0x2e, 0xb9, 0x03, 0xa5, - 0x30, 0x72, 0x82, 0x88, 0x36, 0x2b, 0x11, 0x57, 0xa6, 0x66, 0x5f, 0xf8, 0xe9, 0xe1, 0x64, 0xf7, - 0xb6, 0xdb, 0xa1, 0x62, 0x9f, 0xa8, 0x2b, 0x04, 0x18, 0xe3, 0x22, 0xef, 0x02, 0x04, 0x3d, 0xaf, - 0xde, 0xeb, 0x74, 0x9c, 0xe0, 0x50, 0xea, 0x57, 0xd7, 0xc7, 0xfb, 0x3c, 0xd4, 0xf8, 0x62, 0x55, - 0x23, 0x2e, 0x43, 0x83, 0x1e, 0xf9, 0xb2, 0x05, 0x67, 0xc4, 0x4c, 0x54, 0x1c, 0x4c, 0xe5, 0xcc, - 0xc1, 0x59, 0xd6, 0xb5, 0xeb, 0x26, 0x09, 0x4c, 0x52, 0xb4, 0xff, 0x7d, 0x52, 0x0d, 0xa8, 0x47, - 0xec, 0xb0, 0xd5, 0x3a, 0x24, 0x9f, 0x83, 0x27, 0xc3, 0x5e, 0xa3, 0x41, 0xc3, 0x70, 0xb7, 0xd7, - 0xc6, 0x9e, 0x77, 0xdd, 0x0d, 0x23, 0x3f, 0x38, 0xdc, 0x74, 0x3b, 0x6e, 0xc4, 0x67, 0x5c, 0xb1, - 0x7a, 0xe9, 0xf8, 0xa8, 0xfc, 0x64, 0x7d, 0x50, 0x25, 0x1c, 0xdc, 0x9e, 0x38, 0xf0, 0x54, 0xcf, - 0x1b, 0x8c, 0x5e, 0x68, 0xe8, 0xe5, 0xe3, 0xa3, 0xf2, 0x53, 0xb7, 0x07, 0x57, 0xc3, 0x93, 0x70, - 0xd8, 0xff, 0xd5, 0x62, 0x92, 0x5a, 0x7c, 0xd7, 0x36, 0xed, 0x74, 0xdb, 0x4c, 0xba, 0x9c, 0xbe, - 0xfe, 0x18, 0x25, 0xf4, 0x47, 0xcc, 0x67, 0xbb, 0x53, 0xfc, 0x0f, 0x52, 0x22, 0xed, 0xff, 0x62, - 0xc1, 0xf9, 0x74, 0xe5, 0xc7, 0xa0, 0xf3, 0x84, 0x49, 0x9d, 0xe7, 0x66, 0xbe, 0x5f, 0x3b, 0x40, - 0xf1, 0xf9, 0xea, 0x64, 0xff, 0xb7, 0xfe, 0xbf, 0xbe, 0x8d, 0xc6, 0xbb, 0x62, 0xe1, 0x27, 0xb9, - 0x2b, 0x4e, 0x7e, 0xa0, 0x76, 0xc5, 0xdf, 0x9c, 0x84, 0xb9, 0x8a, 0x17, 0xb9, 0x95, 0xdd, 0x5d, - 0xd7, 0x73, 0xa3, 0x43, 0xf2, 0xf5, 0x09, 0x58, 0xed, 0x06, 0x74, 0x97, 0x06, 0x01, 0x6d, 0xae, - 0xf7, 0x02, 0xd7, 0x6b, 0xd5, 0x1b, 0x7b, 0xb4, 0xd9, 0x6b, 0xbb, 0x5e, 0x6b, 0xa3, 0xe5, 0xf9, - 0xba, 0xf8, 0xea, 0x7d, 0xda, 0xe8, 0xf1, 0x4f, 0x12, 0x8b, 0xa2, 0x33, 0xde, 0x27, 0xd5, 0x46, - 0x23, 0x5a, 0x7d, 0xf1, 0xf8, 0xa8, 0xbc, 0x3a, 0x62, 0x23, 0x1c, 0xf5, 0xd3, 0xc8, 0xd7, 0x26, - 0x60, 0x25, 0xa0, 0x3f, 0xdf, 0x73, 0x87, 0xef, 0x0d, 0x21, 0xb5, 0xda, 0x63, 0x6e, 0x3f, 0x23, - 0xd1, 0xac, 0xbe, 0x70, 0x7c, 0x54, 0x1e, 0xb1, 0x0d, 0x8e, 0xf8, 0x5d, 0x76, 0x0d, 0x66, 0x2b, - 0x5d, 0x37, 0x74, 0xef, 0xa3, 0xdf, 0x8b, 0xe8, 0x10, 0x47, 0xdc, 0x32, 0x14, 0x83, 0x5e, 0x9b, - 0x8a, 0xb5, 0x5d, 0xaa, 0x96, 0x98, 0x14, 0x42, 0x56, 0x80, 0xa2, 0xdc, 0xfe, 0x45, 0x26, 0x71, - 0x39, 0xca, 0x94, 0x71, 0xe3, 0x2e, 0x14, 0x03, 0x46, 0x44, 0xce, 0xac, 0x71, 0xcf, 0x81, 0x31, - 0xd7, 0x92, 0x09, 0xf6, 0x13, 0x05, 0x09, 0xfb, 0x77, 0x27, 0xe0, 0x42, 0xa5, 0xdb, 0xdd, 0xa2, - 0xe1, 0x5e, 0x8a, 0x8b, 0x5f, 0xb6, 0x60, 0xfe, 0xc0, 0x0d, 0xa2, 0x9e, 0xd3, 0x56, 0xf6, 0x2b, - 0xc1, 0x4f, 0x7d, 0x5c, 0x7e, 0x38, 0xb5, 0xb7, 0x12, 0xa8, 0xab, 0xe4, 0xf8, 0xa8, 0x3c, 0x9f, - 0x2c, 0xc3, 0x14, 0x79, 0xf2, 0xf7, 0x2d, 0x58, 0x94, 0x45, 0x37, 0xfd, 0x26, 0x35, 0xed, 0xa3, - 0xb7, 0xf3, 0xe4, 0x49, 0x23, 0x17, 0x76, 0xad, 0x74, 0x29, 0xf6, 0x31, 0x61, 0xff, 0xf7, 0x09, - 0xb8, 0x38, 0x00, 0x07, 0xf9, 0x0d, 0x0b, 0xce, 0x0b, 0xa3, 0xaa, 0x01, 0x42, 0xba, 0x2b, 0x7b, - 0xf3, 0x67, 0xf2, 0xe6, 0x1c, 0xd9, 0x12, 0xa7, 0x5e, 0x83, 0x56, 0x97, 0x98, 0x34, 0x5c, 0xcb, - 0x20, 0x8d, 0x99, 0x0c, 0x71, 0x4e, 0x85, 0x99, 0x35, 0xc5, 0xe9, 0xc4, 0x63, 0xe1, 0xb4, 0x9e, - 0x41, 0x1a, 0x33, 0x19, 0xb2, 0xff, 0x26, 0x3c, 0x75, 0x02, 0xba, 0x87, 0x2f, 0x4e, 0xfb, 0x6d, - 0x3d, 0xeb, 0x93, 0x73, 0x6e, 0x88, 0x75, 0x6d, 0xc3, 0x14, 0x5f, 0x3a, 0x6a, 0x61, 0x03, 0xdb, - 0xfe, 0xf8, 0x9a, 0x0a, 0x51, 0x42, 0xec, 0xdf, 0xb5, 0x60, 0x66, 0x04, 0x6b, 0x58, 0x39, 0x69, - 0x0d, 0x2b, 0xf5, 0x59, 0xc2, 0xa2, 0x7e, 0x4b, 0xd8, 0xeb, 0xe3, 0x8d, 0xc6, 0x30, 0x16, 0xb0, - 0x1f, 0x5b, 0x70, 0xb6, 0xcf, 0x62, 0x46, 0xf6, 0xe0, 0x7c, 0xd7, 0x6f, 0x2a, 0xb5, 0xe9, 0xba, - 0x13, 0xee, 0x71, 0x98, 0xfc, 0xbc, 0x97, 0xd8, 0x48, 0xd6, 0x32, 0xe0, 0x0f, 0x8e, 0xca, 0x4b, - 0x1a, 0x49, 0xaa, 0x02, 0x66, 0x62, 0x24, 0x5d, 0x98, 0xd9, 0x75, 0x69, 0xbb, 0x19, 0x4f, 0xc1, - 0x31, 0x15, 0xa4, 0x6b, 0x12, 0x9b, 0x30, 0x16, 0xab, 0x7f, 0xa8, 0xa9, 0xd8, 0x5f, 0x82, 0xf9, - 0xe4, 0x95, 0xc4, 0x10, 0x83, 0x77, 0x09, 0x0a, 0x4e, 0xe0, 0xc9, 0xa1, 0x9b, 0x95, 0x15, 0x0a, - 0x15, 0xbc, 0x89, 0xac, 0x9c, 0x3c, 0x0f, 0x33, 0xbb, 0xbd, 0x76, 0x9b, 0x35, 0x90, 0xf6, 0x7f, - 0xad, 0x0e, 0x5f, 0x93, 0xe5, 0xa8, 0x6b, 0xd8, 0x7f, 0x31, 0x09, 0x0b, 0xd5, 0x76, 0x8f, 0xbe, - 0x1e, 0x50, 0xaa, 0x0e, 0xe9, 0x15, 0x58, 0xe8, 0x06, 0xf4, 0xc0, 0xa5, 0xf7, 0xea, 0xb4, 0x4d, - 0x1b, 0x91, 0x1f, 0x48, 0x6e, 0x2e, 0x4a, 0x44, 0x0b, 0xb5, 0x24, 0x18, 0xd3, 0xf5, 0xc9, 0x6b, - 0x30, 0xef, 0x34, 0x22, 0xf7, 0x80, 0x6a, 0x0c, 0x82, 0xdd, 0x0f, 0x49, 0x0c, 0xf3, 0x95, 0x04, - 0x14, 0x53, 0xb5, 0xc9, 0xe7, 0x61, 0x29, 0x6c, 0x38, 0x6d, 0x7a, 0xbb, 0x2b, 0x49, 0xad, 0xed, - 0xd1, 0xc6, 0x7e, 0xcd, 0x77, 0xbd, 0x48, 0x9a, 0x64, 0x9e, 0x91, 0x98, 0x96, 0xea, 0x03, 0xea, - 0xe1, 0x40, 0x0c, 0xe4, 0x5f, 0x59, 0x70, 0xa9, 0x1b, 0xd0, 0x5a, 0xe0, 0x77, 0x7c, 0xb6, 0xd7, - 0xf6, 0xd9, 0x29, 0xe4, 0x79, 0xfd, 0xad, 0x31, 0x95, 0x0a, 0x51, 0xd2, 0x6f, 0xde, 0xfe, 0xf0, - 0xf1, 0x51, 0xf9, 0x52, 0xed, 0x24, 0x06, 0xf0, 0x64, 0xfe, 0xc8, 0xbf, 0xb6, 0xe0, 0x72, 0xd7, - 0x0f, 0xa3, 0x13, 0x3e, 0xa1, 0x78, 0xaa, 0x9f, 0x60, 0x1f, 0x1f, 0x95, 0x2f, 0xd7, 0x4e, 0xe4, - 0x00, 0x1f, 0xc2, 0xa1, 0x7d, 0x3c, 0x0b, 0x67, 0x8d, 0xb9, 0x27, 0x0f, 0xf1, 0xaf, 0xc2, 0x19, - 0x35, 0x19, 0x62, 0x25, 0xa0, 0x14, 0x1b, 0x5d, 0x2a, 0x26, 0x10, 0x93, 0x75, 0xd9, 0xbc, 0xd3, - 0x53, 0x51, 0xb4, 0x4e, 0xcd, 0xbb, 0x5a, 0x02, 0x8a, 0xa9, 0xda, 0x64, 0x03, 0xce, 0xc9, 0x12, - 0xa4, 0xdd, 0xb6, 0xdb, 0x70, 0xd6, 0xfc, 0x9e, 0x9c, 0x72, 0xc5, 0xea, 0xc5, 0xe3, 0xa3, 0xf2, - 0xb9, 0x5a, 0x3f, 0x18, 0xb3, 0xda, 0x90, 0x4d, 0x38, 0xef, 0xf4, 0x22, 0x5f, 0x7f, 0xff, 0x55, - 0x8f, 0xed, 0x2b, 0x4d, 0x3e, 0xb5, 0x66, 0xc4, 0x06, 0x54, 0xc9, 0x80, 0x63, 0x66, 0x2b, 0x52, - 0x4b, 0x61, 0xab, 0xd3, 0x86, 0xef, 0x35, 0xc5, 0x28, 0x17, 0xe3, 0xa3, 0x48, 0x25, 0xa3, 0x0e, - 0x66, 0xb6, 0x24, 0x6d, 0x98, 0xef, 0x38, 0xf7, 0x6f, 0x7b, 0xce, 0x81, 0xe3, 0xb6, 0x19, 0x11, - 0x69, 0xc8, 0x19, 0x6c, 0x5d, 0xe8, 0x45, 0x6e, 0x7b, 0x45, 0x5c, 0x71, 0xaf, 0x6c, 0x78, 0xd1, - 0x9b, 0x41, 0x3d, 0x62, 0x2a, 0xab, 0x50, 0xa5, 0xb6, 0x12, 0xb8, 0x30, 0x85, 0x9b, 0xbc, 0x09, - 0x17, 0xf8, 0x72, 0x5c, 0xf7, 0xef, 0x79, 0xeb, 0xb4, 0xed, 0x1c, 0xaa, 0x0f, 0x98, 0xe6, 0x1f, - 0xf0, 0xe4, 0xf1, 0x51, 0xf9, 0x42, 0x3d, 0xab, 0x02, 0x66, 0xb7, 0x23, 0x0e, 0x3c, 0x95, 0x04, - 0x20, 0x3d, 0x70, 0x43, 0xd7, 0xf7, 0x84, 0x39, 0x66, 0x26, 0x36, 0xc7, 0xd4, 0x07, 0x57, 0xc3, - 0x93, 0x70, 0x90, 0x7f, 0x68, 0xc1, 0xf9, 0xac, 0x65, 0xb8, 0x54, 0xca, 0xe3, 0xa2, 0x2d, 0xb5, - 0xb4, 0xc4, 0x8c, 0xc8, 0x14, 0x0a, 0x99, 0x4c, 0x90, 0xf7, 0x2c, 0x98, 0x73, 0x8c, 0xa3, 0xe4, - 0x12, 0x70, 0xae, 0x6e, 0x8c, 0x6b, 0xd0, 0x88, 0x31, 0x56, 0x17, 0x8f, 0x8f, 0xca, 0x89, 0xe3, - 0x2a, 0x26, 0x28, 0x92, 0x7f, 0x6c, 0xc1, 0x85, 0xcc, 0x35, 0xbe, 0x34, 0x7b, 0x1a, 0x3d, 0xc4, - 0x27, 0x49, 0xb6, 0xcc, 0xc9, 0x66, 0x83, 0x7c, 0xd3, 0xd2, 0x5b, 0x99, 0xba, 0x7b, 0x59, 0x9a, - 0xe3, 0xac, 0xdd, 0x1a, 0xf3, 0xf4, 0x1c, 0xab, 0x0f, 0x0a, 0x71, 0xf5, 0x9c, 0xb1, 0x33, 0xaa, - 0x42, 0x4c, 0x93, 0x27, 0xdf, 0xb0, 0xd4, 0xd6, 0xa8, 0x39, 0x3a, 0x73, 0x5a, 0x1c, 0x91, 0x78, - 0xa7, 0xd5, 0x0c, 0xa5, 0x88, 0x93, 0x9f, 0x85, 0x65, 0x67, 0xc7, 0x0f, 0xa2, 0xcc, 0xc5, 0xb7, - 0x34, 0xcf, 0x97, 0xd1, 0xe5, 0xe3, 0xa3, 0xf2, 0x72, 0x65, 0x60, 0x2d, 0x3c, 0x01, 0x83, 0xfd, - 0xdb, 0x45, 0x98, 0x13, 0x47, 0x02, 0xb9, 0x75, 0xfd, 0x8e, 0x05, 0x4f, 0x37, 0x7a, 0x41, 0x40, - 0xbd, 0xa8, 0x1e, 0xd1, 0x6e, 0xff, 0xc6, 0x65, 0x9d, 0xea, 0xc6, 0xf5, 0xcc, 0xf1, 0x51, 0xf9, - 0xe9, 0xb5, 0x13, 0xe8, 0xe3, 0x89, 0xdc, 0x91, 0x7f, 0x67, 0x81, 0x2d, 0x2b, 0x54, 0x9d, 0xc6, - 0x7e, 0x2b, 0xf0, 0x7b, 0x5e, 0xb3, 0xff, 0x23, 0x26, 0x4e, 0xf5, 0x23, 0x9e, 0x3b, 0x3e, 0x2a, - 0xdb, 0x6b, 0x0f, 0xe5, 0x02, 0x87, 0xe0, 0x94, 0xbc, 0x0e, 0x67, 0x65, 0xad, 0xab, 0xf7, 0xbb, - 0x34, 0x70, 0x99, 0xf2, 0x2d, 0x15, 0xc7, 0xd8, 0x6d, 0x27, 0x5d, 0x01, 0xfb, 0xdb, 0x90, 0x10, - 0xa6, 0xef, 0x51, 0xb7, 0xb5, 0x17, 0x29, 0xf5, 0x69, 0x4c, 0x5f, 0x1d, 0x69, 0x1e, 0xb8, 0x23, - 0x70, 0x56, 0x67, 0x8f, 0x8f, 0xca, 0xd3, 0xf2, 0x0f, 0x2a, 0x4a, 0xe4, 0x26, 0xcc, 0x8b, 0x03, - 0x5b, 0xcd, 0xf5, 0x5a, 0x35, 0xdf, 0x13, 0x0e, 0x27, 0xa5, 0xea, 0x73, 0x6a, 0xc3, 0xaf, 0x27, - 0xa0, 0x0f, 0x8e, 0xca, 0x73, 0xea, 0xf7, 0xf6, 0x61, 0x97, 0x62, 0xaa, 0xb5, 0xfd, 0xfb, 0x53, - 0x00, 0x6a, 0xba, 0xd2, 0x2e, 0xf9, 0x28, 0x94, 0x42, 0x1a, 0x09, 0xaa, 0xf2, 0x06, 0x41, 0x5c, - 0xcc, 0xa8, 0x42, 0x8c, 0xe1, 0x64, 0x1f, 0x8a, 0x5d, 0xa7, 0x17, 0x52, 0x39, 0xf8, 0x37, 0x72, - 0x19, 0xfc, 0x1a, 0xc3, 0x28, 0x4e, 0x68, 0xfc, 0x27, 0x0a, 0x1a, 0xe4, 0x2b, 0x16, 0x00, 0x4d, - 0x0e, 0xd8, 0xd8, 0x96, 0x12, 0x49, 0x32, 0x1e, 0x53, 0xd6, 0x07, 0xd5, 0xf9, 0xe3, 0xa3, 0x32, - 0x18, 0x43, 0x6f, 0x90, 0x25, 0xf7, 0x60, 0xc6, 0x51, 0x32, 0x7f, 0xf2, 0x34, 0x64, 0x3e, 0x3f, - 0x38, 0xe9, 0x49, 0xab, 0x89, 0x91, 0xaf, 0x59, 0x30, 0x1f, 0xd2, 0x48, 0x0e, 0x15, 0x93, 0x3c, - 0x52, 0xe1, 0x1d, 0x73, 0xd2, 0xd5, 0x13, 0x38, 0x85, 0x04, 0x4d, 0x96, 0x61, 0x8a, 0xae, 0x62, - 0xe5, 0x3a, 0x75, 0x9a, 0x34, 0xe0, 0xe7, 0x72, 0xa9, 0x49, 0x8d, 0xcf, 0x8a, 0x81, 0x53, 0xb3, - 0x62, 0x94, 0x61, 0x8a, 0xae, 0x62, 0x65, 0xcb, 0x0d, 0x02, 0x5f, 0xb2, 0x32, 0x93, 0x13, 0x2b, - 0x06, 0x4e, 0xcd, 0x8a, 0x51, 0x86, 0x29, 0xba, 0xf6, 0xb7, 0xcf, 0xc0, 0xbc, 0x5a, 0x48, 0xb1, - 0x66, 0x2f, 0xcc, 0x40, 0x03, 0x34, 0xfb, 0x35, 0x13, 0x88, 0xc9, 0xba, 0xac, 0xb1, 0x58, 0xaa, - 0x49, 0xc5, 0x5e, 0x37, 0xae, 0x9b, 0x40, 0x4c, 0xd6, 0x25, 0x1d, 0x28, 0x86, 0x11, 0xed, 0xaa, - 0xcb, 0xe0, 0x31, 0xef, 0x2a, 0x63, 0xf9, 0x10, 0x5f, 0xf7, 0xb0, 0x7f, 0x21, 0x0a, 0x2a, 0xdc, - 0x92, 0x19, 0x25, 0x8c, 0x9b, 0x72, 0x71, 0xe4, 0xb3, 0x3e, 0x93, 0x76, 0x53, 0x31, 0x1a, 0xc9, - 0x32, 0x4c, 0x91, 0xcf, 0x50, 0xf6, 0x8b, 0xa7, 0xa8, 0xec, 0x7f, 0x16, 0x66, 0x3a, 0xce, 0xfd, - 0x7a, 0x2f, 0x68, 0x3d, 0xfa, 0xa1, 0x42, 0xba, 0xd7, 0x09, 0x2c, 0xa8, 0xf1, 0x91, 0x2f, 0x5b, - 0x86, 0xc8, 0x99, 0xe6, 0xc8, 0xef, 0xe4, 0x2b, 0x72, 0xf4, 0x5e, 0x39, 0x50, 0xf8, 0xf4, 0xa9, - 0xde, 0x33, 0x8f, 0x5d, 0xf5, 0x66, 0x6a, 0xa4, 0x58, 0x20, 0x5a, 0x8d, 0x2c, 0x9d, 0xaa, 0x1a, - 0xb9, 0x96, 0x20, 0x86, 0x29, 0xe2, 0x9c, 0x1f, 0xb1, 0xe6, 0x34, 0x3f, 0x70, 0xaa, 0xfc, 0xd4, - 0x13, 0xc4, 0x30, 0x45, 0x7c, 0xf0, 0x79, 0x73, 0xf6, 0x74, 0xce, 0x9b, 0x73, 0x39, 0x9c, 0x37, - 0x4f, 0x56, 0xc5, 0xcf, 0x8c, 0xab, 0x8a, 0x93, 0x1b, 0x40, 0x9a, 0x87, 0x9e, 0xd3, 0x71, 0x1b, - 0x52, 0x58, 0xf2, 0x6d, 0x73, 0x9e, 0xdb, 0x23, 0x96, 0xa5, 0x20, 0x23, 0xeb, 0x7d, 0x35, 0x30, - 0xa3, 0x15, 0x89, 0x60, 0xa6, 0xab, 0x34, 0xae, 0x85, 0x3c, 0x66, 0xbf, 0xd2, 0xc0, 0x84, 0xbf, - 0x00, 0x5b, 0x78, 0xaa, 0x04, 0x35, 0x25, 0xb2, 0x09, 0xe7, 0x3b, 0xae, 0x57, 0xf3, 0x9b, 0x61, - 0x8d, 0x06, 0xd2, 0xda, 0x52, 0xa7, 0xd1, 0xd2, 0x22, 0xef, 0x1b, 0x7e, 0x82, 0xde, 0xca, 0x80, - 0x63, 0x66, 0x2b, 0xfb, 0x7f, 0x5a, 0xb0, 0xb8, 0xd6, 0xf6, 0x7b, 0xcd, 0x3b, 0x4e, 0xd4, 0xd8, - 0x13, 0x57, 0xe5, 0xe4, 0x35, 0x98, 0x71, 0xbd, 0x88, 0x06, 0x07, 0x4e, 0x5b, 0xee, 0x4f, 0xb6, - 0x32, 0x9f, 0x6e, 0xc8, 0xf2, 0x07, 0x47, 0xe5, 0xf9, 0xf5, 0x5e, 0xc0, 0xdd, 0xe9, 0x84, 0xb4, - 0x42, 0xdd, 0x86, 0x7c, 0xdb, 0x82, 0xb3, 0xe2, 0xb2, 0x7d, 0xdd, 0x89, 0x9c, 0x5b, 0x3d, 0x1a, - 0xb8, 0x54, 0x5d, 0xb7, 0x8f, 0x29, 0xa8, 0xd2, 0xbc, 0x2a, 0x02, 0x87, 0xb1, 0xa2, 0xbe, 0x95, - 0xa6, 0x8c, 0xfd, 0xcc, 0xd8, 0xbf, 0x52, 0x80, 0x27, 0x07, 0xe2, 0x22, 0xcb, 0x30, 0xe1, 0x36, - 0xe5, 0xa7, 0x83, 0xc4, 0x3b, 0xb1, 0xd1, 0xc4, 0x09, 0xb7, 0x49, 0x56, 0xb8, 0xce, 0x19, 0xd0, - 0x30, 0x54, 0x37, 0xaf, 0x25, 0xad, 0x1e, 0xca, 0x52, 0x34, 0x6a, 0x90, 0x32, 0x14, 0xb9, 0x6b, - 0xa5, 0x3c, 0x4f, 0x70, 0x2d, 0x96, 0x7b, 0x31, 0xa2, 0x28, 0x27, 0xbf, 0x68, 0x01, 0x08, 0x06, - 0xd9, 0x69, 0x44, 0xee, 0x92, 0x98, 0x6f, 0x37, 0x31, 0xcc, 0x82, 0xcb, 0xf8, 0x3f, 0x1a, 0x54, - 0xc9, 0x36, 0x4c, 0x31, 0x85, 0xd6, 0x6f, 0x3e, 0xf2, 0xa6, 0xc8, 0xaf, 0x64, 0x6a, 0x1c, 0x07, - 0x4a, 0x5c, 0xac, 0xaf, 0x02, 0x1a, 0xf5, 0x02, 0x8f, 0x75, 0x2d, 0xdf, 0x06, 0x67, 0x04, 0x17, - 0xa8, 0x4b, 0xd1, 0xa8, 0x61, 0xff, 0xcb, 0x09, 0x38, 0x9f, 0xc5, 0x3a, 0xdb, 0x6d, 0xa6, 0x04, - 0xb7, 0xf2, 0x68, 0xfc, 0x99, 0xfc, 0xfb, 0x47, 0xfa, 0x8d, 0x68, 0xef, 0x0a, 0xe9, 0xd9, 0x26, - 0xe9, 0x92, 0xcf, 0xe8, 0x1e, 0x9a, 0x78, 0xc4, 0x1e, 0xd2, 0x98, 0x53, 0xbd, 0xf4, 0x0c, 0x4c, - 0x86, 0x6c, 0xe4, 0x0b, 0xc9, 0xeb, 0x0e, 0x3e, 0x46, 0x1c, 0xc2, 0x6a, 0xf4, 0x3c, 0x37, 0x92, - 0xf1, 0x08, 0xba, 0xc6, 0x6d, 0xcf, 0x8d, 0x90, 0x43, 0xec, 0x6f, 0x4d, 0xc0, 0xf2, 0xe0, 0x8f, - 0x22, 0xdf, 0xb2, 0x00, 0x9a, 0xec, 0xb8, 0x12, 0x72, 0xa7, 0x5e, 0xe1, 0x67, 0xe3, 0x9c, 0x56, - 0x1f, 0xae, 0x2b, 0x4a, 0xb1, 0xd3, 0x95, 0x2e, 0x0a, 0xd1, 0x60, 0x84, 0xbc, 0xa0, 0xa6, 0x3e, - 0xbf, 0xaa, 0x11, 0x8b, 0x49, 0xb7, 0xd9, 0xd2, 0x10, 0x34, 0x6a, 0xb1, 0xf3, 0xa8, 0xe7, 0x74, - 0x68, 0xd8, 0x75, 0x74, 0x74, 0x07, 0x3f, 0x8f, 0xde, 0x54, 0x85, 0x18, 0xc3, 0xed, 0x36, 0x3c, - 0x3b, 0x04, 0x9f, 0x39, 0x39, 0xcf, 0xdb, 0xff, 0xc3, 0x82, 0x8b, 0x6b, 0xed, 0x5e, 0x18, 0xd1, - 0xe0, 0xff, 0x1b, 0x1f, 0xb6, 0xff, 0x65, 0xc1, 0x53, 0x03, 0xbe, 0xf9, 0x31, 0xb8, 0xb2, 0xbd, - 0x93, 0x74, 0x65, 0xbb, 0x3d, 0xee, 0x94, 0xce, 0xfc, 0x8e, 0x01, 0x1e, 0x6d, 0xbf, 0x69, 0xc1, - 0x19, 0x26, 0xb6, 0x9a, 0x7e, 0x2b, 0xa7, 0x8d, 0xf3, 0x59, 0x28, 0xfe, 0x3c, 0xdb, 0x80, 0xd2, - 0x93, 0x8c, 0xef, 0x4a, 0x28, 0x60, 0x6c, 0xcd, 0x38, 0x5d, 0xf7, 0x2d, 0x1a, 0xf0, 0x0d, 0xa8, - 0x90, 0x5c, 0x33, 0x15, 0x0d, 0x41, 0xa3, 0x96, 0xfd, 0x69, 0x90, 0xce, 0x62, 0xa9, 0x15, 0x67, - 0x0d, 0xb3, 0xe2, 0xec, 0xff, 0x30, 0x01, 0x86, 0xf1, 0xe3, 0x31, 0xcc, 0x64, 0x2f, 0x31, 0x93, - 0xc7, 0x3c, 0xb8, 0x1b, 0xa6, 0x9c, 0x41, 0xc1, 0x3c, 0x07, 0xa9, 0x60, 0x9e, 0x9b, 0xb9, 0x51, - 0x3c, 0x39, 0x96, 0xe7, 0x07, 0x16, 0x3c, 0x15, 0x57, 0xee, 0xb7, 0x4b, 0x3e, 0x5c, 0x2c, 0xbd, - 0x0c, 0xb3, 0x4e, 0xdc, 0x4c, 0xce, 0x1b, 0x23, 0x92, 0x42, 0x83, 0xd0, 0xac, 0x17, 0xfb, 0xa0, - 0x17, 0x1e, 0xd1, 0x07, 0x7d, 0xf2, 0x64, 0x1f, 0x74, 0xfb, 0xcf, 0x27, 0xe0, 0x52, 0xff, 0x97, - 0xa9, 0x05, 0x35, 0xdc, 0x25, 0xff, 0x2b, 0x30, 0x17, 0xc9, 0x06, 0xc6, 0xf6, 0xa0, 0xa3, 0x3a, - 0xb7, 0x0d, 0x18, 0x26, 0x6a, 0xb2, 0x96, 0x0d, 0xb1, 0x94, 0xeb, 0x0d, 0xbf, 0xab, 0x22, 0x18, - 0x74, 0xcb, 0x35, 0x03, 0x86, 0x89, 0x9a, 0xda, 0x37, 0x74, 0xf2, 0xd4, 0x7d, 0x43, 0xeb, 0x70, - 0x41, 0x79, 0xc3, 0x5d, 0xf3, 0x83, 0x35, 0xbf, 0xd3, 0x6d, 0x53, 0x19, 0xc3, 0xc0, 0x98, 0xbd, - 0x24, 0x9b, 0x5c, 0xc0, 0xac, 0x4a, 0x98, 0xdd, 0xd6, 0xfe, 0x41, 0x01, 0xce, 0xc5, 0xdd, 0xbe, - 0xe6, 0x7b, 0x4d, 0x97, 0xfb, 0x14, 0xbe, 0x0a, 0x93, 0xd1, 0x61, 0x57, 0x75, 0xf6, 0x5f, 0x55, - 0xec, 0x6c, 0x1f, 0x76, 0xd9, 0x68, 0x5f, 0xcc, 0x68, 0xc2, 0x2d, 0xc3, 0xbc, 0x11, 0xd9, 0xd4, - 0xab, 0x43, 0x8c, 0xc0, 0x4b, 0xc9, 0xd9, 0xfc, 0xe0, 0xa8, 0x9c, 0x11, 0xd4, 0xbc, 0xa2, 0x31, - 0x25, 0xe7, 0x3c, 0xb9, 0x0b, 0xf3, 0x6d, 0x27, 0x8c, 0x6e, 0x77, 0x9b, 0x4e, 0x44, 0xb7, 0x5d, - 0xe9, 0xa1, 0x31, 0x5a, 0x60, 0x80, 0xbe, 0xca, 0xde, 0x4c, 0x60, 0xc2, 0x14, 0x66, 0x72, 0x00, - 0x84, 0x95, 0x6c, 0x07, 0x8e, 0x17, 0x8a, 0xaf, 0x62, 0xf4, 0x46, 0x0f, 0x44, 0xd0, 0x27, 0xc3, - 0xcd, 0x3e, 0x6c, 0x98, 0x41, 0x81, 0x3c, 0x07, 0x53, 0x01, 0x75, 0x42, 0x39, 0x98, 0xa5, 0x78, - 0xfd, 0x23, 0x2f, 0x45, 0x09, 0x35, 0x17, 0xd4, 0xd4, 0x43, 0x16, 0xd4, 0x9f, 0x5a, 0x30, 0x1f, - 0x0f, 0xd3, 0x63, 0xd8, 0x59, 0x3b, 0xc9, 0x9d, 0xf5, 0x7a, 0x5e, 0x22, 0x71, 0xc0, 0x66, 0xfa, - 0x07, 0x53, 0xe6, 0xf7, 0x71, 0xc7, 0xf0, 0x2f, 0x42, 0x49, 0xad, 0x6a, 0xa5, 0xb2, 0x8e, 0x79, - 0xc0, 0x4e, 0x28, 0x33, 0x46, 0x40, 0x93, 0x24, 0x82, 0x31, 0x3d, 0xb6, 0x95, 0x37, 0xe5, 0x36, - 0x2d, 0xa7, 0xbd, 0xde, 0xca, 0xd5, 0xf6, 0x9d, 0xb5, 0x95, 0xab, 0x36, 0xe4, 0x36, 0x5c, 0xec, - 0x06, 0x3e, 0x0f, 0xab, 0x5d, 0xa7, 0x4e, 0xb3, 0xed, 0x7a, 0x54, 0x59, 0x31, 0x84, 0x27, 0xc5, - 0x53, 0xc7, 0x47, 0xe5, 0x8b, 0xb5, 0xec, 0x2a, 0x38, 0xa8, 0x6d, 0x32, 0x30, 0x6b, 0x72, 0x88, - 0xc0, 0xac, 0xbf, 0xa3, 0x6d, 0x85, 0x34, 0x94, 0xe1, 0x51, 0x9f, 0xcb, 0x6b, 0x28, 0x33, 0xc4, - 0x7a, 0x3c, 0xa5, 0x2a, 0x92, 0x28, 0x6a, 0xf2, 0x83, 0x0d, 0x52, 0x53, 0x8f, 0x68, 0x90, 0x8a, - 0xfd, 0xeb, 0xa7, 0x7f, 0x92, 0xfe, 0xf5, 0x33, 0x1f, 0x28, 0xff, 0xfa, 0xf7, 0x8b, 0xb0, 0x98, - 0xd6, 0x40, 0x4e, 0x3f, 0xe8, 0xec, 0xef, 0x59, 0xb0, 0xa8, 0x56, 0x8f, 0xa0, 0x49, 0xd5, 0x55, - 0xc3, 0x66, 0x4e, 0x8b, 0x56, 0xe8, 0x52, 0x3a, 0x1a, 0x7f, 0x3b, 0x45, 0x0d, 0xfb, 0xe8, 0x93, - 0xb7, 0x61, 0x56, 0x5b, 0xe4, 0x1f, 0x29, 0x02, 0x6d, 0x81, 0x6b, 0x51, 0x31, 0x0a, 0x34, 0xf1, - 0x91, 0xf7, 0x2d, 0x80, 0x86, 0xda, 0xe6, 0xd4, 0xea, 0xba, 0x95, 0xd7, 0xea, 0xd2, 0x1b, 0x68, - 0xac, 0x2c, 0xeb, 0xa2, 0x10, 0x0d, 0xc2, 0xe4, 0x57, 0xb8, 0x2d, 0x5e, 0x6b, 0x77, 0x22, 0xca, - 0x7f, 0x6c, 0xdf, 0xe1, 0x13, 0x14, 0xd3, 0x58, 0x95, 0x32, 0x40, 0x21, 0x26, 0x98, 0xb0, 0x5f, - 0x05, 0xed, 0xed, 0xc9, 0xc4, 0x16, 0xf7, 0xf7, 0xac, 0x39, 0xd1, 0x9e, 0x9c, 0x82, 0x5a, 0x6c, - 0x5d, 0x53, 0x00, 0x8c, 0xeb, 0xd8, 0x5f, 0x80, 0xf9, 0xd7, 0x03, 0xa7, 0xbb, 0xe7, 0x72, 0x9b, - 0x37, 0x3b, 0x5b, 0x7d, 0x04, 0xa6, 0x9d, 0x66, 0x33, 0x2b, 0x71, 0x44, 0x45, 0x14, 0xa3, 0x82, - 0x0f, 0x75, 0x8c, 0xb2, 0x7f, 0xdf, 0x02, 0x12, 0xdf, 0x1b, 0xba, 0x5e, 0x6b, 0xcb, 0x89, 0x1a, - 0x7b, 0xec, 0x7c, 0xb4, 0xc7, 0x4b, 0xb3, 0xce, 0x47, 0xd7, 0x35, 0x04, 0x8d, 0x5a, 0xe4, 0x5d, - 0x98, 0x15, 0xff, 0xde, 0xd2, 0x16, 0x82, 0xb1, 0x23, 0x08, 0xc4, 0x86, 0xc2, 0x79, 0x12, 0xb3, - 0xf0, 0x7a, 0x4c, 0x01, 0x4d, 0x72, 0xac, 0xab, 0x36, 0xbc, 0xdd, 0x76, 0xef, 0x7e, 0x73, 0x27, - 0xee, 0xaa, 0x6e, 0xe0, 0xef, 0xba, 0x6d, 0x9a, 0xee, 0xaa, 0x9a, 0x28, 0x46, 0x05, 0x1f, 0xae, - 0xab, 0xfe, 0x8d, 0x05, 0xe7, 0x37, 0xc2, 0xc8, 0xf5, 0xd7, 0x69, 0x18, 0xb1, 0x6d, 0x85, 0x09, - 0x9f, 0x5e, 0x7b, 0x18, 0xc7, 0xed, 0x75, 0x58, 0x94, 0x77, 0x98, 0xbd, 0x9d, 0x90, 0x46, 0x86, - 0x1e, 0xaf, 0xd7, 0xf1, 0x5a, 0x0a, 0x8e, 0x7d, 0x2d, 0x18, 0x16, 0x79, 0x99, 0x19, 0x63, 0x29, - 0x24, 0xb1, 0xd4, 0x53, 0x70, 0xec, 0x6b, 0x61, 0x7f, 0xbf, 0x00, 0xe7, 0xf8, 0x67, 0xa4, 0x82, - 0x2e, 0xbe, 0x31, 0x28, 0xe8, 0x62, 0xcc, 0xa5, 0xcc, 0x69, 0x3d, 0x42, 0xc8, 0xc5, 0xdf, 0xb5, - 0x60, 0xa1, 0x99, 0xec, 0xe9, 0x7c, 0x6c, 0x3a, 0x59, 0x63, 0x28, 0x5c, 0xb6, 0x52, 0x85, 0x98, - 0xa6, 0x4f, 0x7e, 0xd5, 0x82, 0x85, 0x24, 0x9b, 0x4a, 0xba, 0x9f, 0x42, 0x27, 0x69, 0x1f, 0xeb, - 0x64, 0x79, 0x88, 0x69, 0x16, 0xec, 0x3f, 0x9c, 0x90, 0x43, 0x7a, 0x1a, 0x11, 0x05, 0xe4, 0x1e, - 0x94, 0xa2, 0x76, 0x28, 0x0a, 0xe5, 0xd7, 0x8e, 0x79, 0x22, 0xdc, 0xde, 0xac, 0x0b, 0xf7, 0x81, - 0x58, 0x69, 0x93, 0x25, 0x4c, 0xf9, 0x54, 0xb4, 0x38, 0xe1, 0x46, 0x57, 0x12, 0xce, 0xe5, 0x28, - 0xba, 0xbd, 0x56, 0x4b, 0x13, 0x96, 0x25, 0x8c, 0xb0, 0xa2, 0x65, 0xff, 0x96, 0x05, 0xa5, 0x1b, - 0xbe, 0x92, 0x23, 0x3f, 0x9b, 0x83, 0xa1, 0x47, 0xeb, 0x83, 0xfa, 0x9a, 0x32, 0x3e, 0x62, 0xbc, - 0x96, 0x30, 0xf3, 0x3c, 0x6d, 0xe0, 0x5e, 0xe1, 0xf9, 0xb3, 0x18, 0xaa, 0x1b, 0xfe, 0xce, 0x40, - 0xd3, 0xe3, 0xaf, 0x15, 0xe1, 0xcc, 0x1b, 0xce, 0x21, 0xf5, 0x22, 0x67, 0xf4, 0x4d, 0xe2, 0x65, - 0x98, 0x75, 0xba, 0xfc, 0x1e, 0xcc, 0xd0, 0xf1, 0x63, 0xcb, 0x49, 0x0c, 0x42, 0xb3, 0x5e, 0x2c, - 0xd0, 0x44, 0x76, 0x9d, 0x2c, 0x51, 0xb4, 0x96, 0x82, 0x63, 0x5f, 0x0b, 0x72, 0x03, 0x88, 0x8c, - 0x46, 0xad, 0x34, 0x1a, 0x7e, 0xcf, 0x13, 0x22, 0x4d, 0x18, 0x55, 0xf4, 0x61, 0x73, 0xab, 0xaf, - 0x06, 0x66, 0xb4, 0x22, 0x9f, 0x87, 0xa5, 0x06, 0xc7, 0x2c, 0x8f, 0x1e, 0x26, 0x46, 0x71, 0xfc, - 0xd4, 0x71, 0x02, 0x6b, 0x03, 0xea, 0xe1, 0x40, 0x0c, 0x8c, 0xd3, 0x30, 0xf2, 0x03, 0xa7, 0x45, - 0x4d, 0xbc, 0x53, 0x49, 0x4e, 0xeb, 0x7d, 0x35, 0x30, 0xa3, 0x15, 0xf9, 0x12, 0x94, 0xa2, 0xbd, - 0x80, 0x86, 0x7b, 0x7e, 0xbb, 0x29, 0xfd, 0x16, 0xc6, 0xb4, 0xb4, 0xc9, 0xd1, 0xdf, 0x56, 0x58, - 0x8d, 0xe9, 0xad, 0x8a, 0x30, 0xa6, 0x49, 0x02, 0x98, 0x0a, 0x1b, 0x7e, 0x97, 0x86, 0x52, 0x65, - 0xbf, 0x91, 0x0b, 0x75, 0x6e, 0x39, 0x32, 0x6c, 0x7c, 0x9c, 0x02, 0x4a, 0x4a, 0xf6, 0xef, 0x4d, - 0xc0, 0x9c, 0x59, 0x71, 0x08, 0xd9, 0xf4, 0x15, 0x0b, 0xe6, 0x1a, 0xbe, 0x17, 0x05, 0x7e, 0x5b, - 0xd8, 0xaf, 0xf2, 0xd1, 0x28, 0x18, 0xaa, 0x75, 0x1a, 0x39, 0x6e, 0xdb, 0x30, 0x85, 0x19, 0x64, - 0x30, 0x41, 0x94, 0x7c, 0xdd, 0x82, 0x85, 0xd8, 0xcd, 0x2d, 0x36, 0xa4, 0xe5, 0xca, 0x88, 0x16, - 0xf5, 0x57, 0x93, 0x94, 0x30, 0x4d, 0xda, 0xde, 0x81, 0xc5, 0xf4, 0x68, 0xb3, 0xae, 0xec, 0x3a, - 0x72, 0xad, 0x17, 0xe2, 0xae, 0xac, 0x39, 0x61, 0x88, 0x1c, 0x42, 0x9e, 0x87, 0x99, 0x8e, 0x13, - 0xb4, 0x5c, 0xcf, 0x69, 0xf3, 0x5e, 0x2c, 0x18, 0x02, 0x49, 0x96, 0xa3, 0xae, 0x61, 0x7f, 0x1c, - 0xe6, 0xb6, 0x1c, 0xaf, 0x45, 0x9b, 0x52, 0x0e, 0x3f, 0x3c, 0xa6, 0xed, 0x47, 0x93, 0x30, 0x6b, - 0x9c, 0xcd, 0x4e, 0xff, 0x9c, 0x95, 0x48, 0xa9, 0x51, 0xc8, 0x31, 0xa5, 0xc6, 0x67, 0x01, 0x76, - 0x5d, 0xcf, 0x0d, 0xf7, 0x1e, 0x31, 0x59, 0x07, 0xbf, 0xd7, 0xbd, 0xa6, 0x31, 0xa0, 0x81, 0x2d, - 0xbe, 0x3c, 0x2b, 0x9e, 0x90, 0x79, 0xea, 0x7d, 0xcb, 0xd8, 0x6e, 0xa6, 0xf2, 0x70, 0x16, 0x30, - 0x06, 0x66, 0x45, 0x6d, 0x3f, 0x22, 0xd7, 0xd0, 0x49, 0xbb, 0xd2, 0x36, 0xcc, 0x04, 0x34, 0xec, - 0x75, 0xd8, 0x89, 0x71, 0x7a, 0xe4, 0x6e, 0xe0, 0x6e, 0x1b, 0x28, 0xdb, 0xa3, 0xc6, 0xb4, 0xfc, - 0x2a, 0x9c, 0x49, 0xb0, 0x30, 0x52, 0xf6, 0x21, 0x1f, 0x32, 0x0d, 0x00, 0x8f, 0x72, 0x99, 0xc3, - 0xc6, 0xa2, 0x6d, 0x64, 0xeb, 0xd0, 0x63, 0x21, 0x9c, 0x73, 0x04, 0xcc, 0xfe, 0xf3, 0x29, 0x90, - 0xf7, 0xdf, 0x43, 0x88, 0x2b, 0xf3, 0xd6, 0x6b, 0xe2, 0x11, 0x6e, 0xbd, 0x6e, 0xc0, 0x9c, 0xeb, - 0xb9, 0x91, 0xeb, 0xb4, 0xb9, 0x71, 0x47, 0x6e, 0xa7, 0xca, 0x7b, 0x79, 0x6e, 0xc3, 0x80, 0x65, - 0xe0, 0x49, 0xb4, 0x25, 0xb7, 0xa0, 0xc8, 0xf7, 0x1b, 0x39, 0x81, 0x47, 0xbf, 0xa4, 0xe7, 0xfe, - 0x19, 0x22, 0xa4, 0x49, 0x60, 0xe2, 0x87, 0x0f, 0x91, 0xae, 0x44, 0x1f, 0xbf, 0xe5, 0x3c, 0x8e, - 0x0f, 0x1f, 0x29, 0x38, 0xf6, 0xb5, 0x60, 0x58, 0x76, 0x1d, 0xb7, 0xdd, 0x0b, 0x68, 0x8c, 0x65, - 0x2a, 0x89, 0xe5, 0x5a, 0x0a, 0x8e, 0x7d, 0x2d, 0xc8, 0x2e, 0xcc, 0xc9, 0x32, 0xe1, 0x72, 0x35, - 0xfd, 0x88, 0x5f, 0xc9, 0x5d, 0xeb, 0xae, 0x19, 0x98, 0x30, 0x81, 0x97, 0xf4, 0xe0, 0xac, 0xeb, - 0x35, 0x7c, 0xaf, 0xd1, 0xee, 0x85, 0xee, 0x01, 0x8d, 0xe3, 0x89, 0x1e, 0x85, 0xd8, 0x85, 0xe3, - 0xa3, 0xf2, 0xd9, 0x8d, 0x34, 0x3a, 0xec, 0xa7, 0x40, 0xbe, 0x6c, 0xc1, 0x85, 0x86, 0xef, 0x85, - 0x3c, 0xfe, 0xff, 0x80, 0x5e, 0x0d, 0x02, 0x3f, 0x10, 0xb4, 0x4b, 0x8f, 0x48, 0x9b, 0xdb, 0x14, - 0xd7, 0xb2, 0x50, 0x62, 0x36, 0x25, 0xf2, 0x0e, 0xcc, 0x74, 0x03, 0xff, 0xc0, 0x6d, 0xd2, 0x40, - 0xba, 0xef, 0x6d, 0xe6, 0x91, 0x8f, 0xa4, 0x26, 0x71, 0xc6, 0xa2, 0x47, 0x95, 0xa0, 0xa6, 0x67, - 0xff, 0xef, 0x59, 0x98, 0x4f, 0x56, 0x27, 0xbf, 0x00, 0xd0, 0x0d, 0xfc, 0x0e, 0x8d, 0xf6, 0xa8, - 0x8e, 0x0b, 0xb9, 0x39, 0x6e, 0xda, 0x0b, 0x85, 0x4f, 0xb9, 0xbc, 0x30, 0x71, 0x11, 0x97, 0xa2, - 0x41, 0x91, 0x04, 0x30, 0xbd, 0x2f, 0xb6, 0x5d, 0xa9, 0x85, 0xbc, 0x91, 0x8b, 0xce, 0x24, 0x29, - 0xf3, 0x80, 0x06, 0x59, 0x84, 0x8a, 0x10, 0xd9, 0x81, 0xc2, 0x3d, 0xba, 0x93, 0x4f, 0xcc, 0xf5, - 0x1d, 0x2a, 0x4f, 0x33, 0xd5, 0xe9, 0xe3, 0xa3, 0x72, 0xe1, 0x0e, 0xdd, 0x41, 0x86, 0x9c, 0x7d, - 0x57, 0x53, 0xdc, 0xdd, 0x4b, 0x51, 0x31, 0xe6, 0x77, 0x25, 0x1c, 0x01, 0xc4, 0x77, 0xc9, 0x22, - 0x54, 0x84, 0xc8, 0x3b, 0x50, 0xba, 0xe7, 0x1c, 0xd0, 0xdd, 0xc0, 0xf7, 0x22, 0xe9, 0x67, 0x35, - 0x66, 0xa8, 0xc0, 0x1d, 0x85, 0x4e, 0xd2, 0xe5, 0xdb, 0xbb, 0x2e, 0xc4, 0x98, 0x1c, 0x39, 0x80, - 0x19, 0x8f, 0xde, 0x43, 0xda, 0x76, 0x1b, 0xf9, 0xb8, 0xe6, 0xdf, 0x94, 0xd8, 0x24, 0x65, 0xbe, - 0xef, 0xa9, 0x32, 0xd4, 0xb4, 0xd8, 0x58, 0xde, 0xf5, 0x77, 0xa4, 0xa0, 0x1a, 0x73, 0x2c, 0xf5, - 0xc9, 0x54, 0x8c, 0xe5, 0x0d, 0x7f, 0x07, 0x19, 0x72, 0xb6, 0x46, 0x1a, 0xda, 0xc9, 0x47, 0x8a, - 0xa9, 0x9b, 0xf9, 0x3a, 0x37, 0x89, 0x35, 0x12, 0x97, 0xa2, 0x41, 0x91, 0xf5, 0x6d, 0x4b, 0x1a, - 0x2b, 0xa5, 0xa0, 0x1a, 0xb3, 0x6f, 0x93, 0xa6, 0x4f, 0xd1, 0xb7, 0xaa, 0x0c, 0x35, 0x2d, 0x46, - 0xd7, 0x95, 0x96, 0xbf, 0x7c, 0x44, 0x55, 0xd2, 0x8e, 0x28, 0xe8, 0xaa, 0x32, 0xd4, 0xb4, 0x58, - 0x7f, 0x87, 0xfb, 0x87, 0xf7, 0x9c, 0xf6, 0xbe, 0xeb, 0xb5, 0x64, 0x9c, 0xe3, 0xb8, 0xd9, 0x6d, - 0xf7, 0x0f, 0xef, 0x08, 0x7c, 0x66, 0x7f, 0xc7, 0xa5, 0x68, 0x50, 0x24, 0xff, 0xc8, 0x82, 0xa9, - 0x6e, 0xbb, 0xd7, 0x72, 0xbd, 0xa5, 0x39, 0xae, 0x27, 0x7e, 0x26, 0x4f, 0x09, 0xbd, 0x52, 0xe3, - 0xa8, 0x85, 0xa2, 0xf8, 0xd3, 0xda, 0x67, 0x8f, 0x17, 0xfe, 0xd2, 0x9f, 0x95, 0x97, 0xa8, 0xd7, - 0xf0, 0x9b, 0xae, 0xd7, 0x5a, 0xbd, 0x1b, 0xfa, 0xde, 0x0a, 0x3a, 0xf7, 0x94, 0x8e, 0x2e, 0x79, - 0x5a, 0xfe, 0x24, 0xcc, 0x1a, 0x28, 0x1e, 0xa6, 0xe8, 0xcd, 0x99, 0x8a, 0xde, 0x6f, 0x4d, 0xc1, - 0x9c, 0x99, 0x51, 0x6f, 0x08, 0xed, 0x4b, 0x9f, 0x38, 0x26, 0x46, 0x39, 0x71, 0xb0, 0x23, 0xa6, - 0x71, 0x7b, 0xa4, 0xcc, 0x5b, 0x1b, 0xb9, 0x29, 0xdc, 0xf1, 0x11, 0xd3, 0x28, 0x0c, 0x31, 0x41, - 0x74, 0x04, 0x87, 0x12, 0xa6, 0xb6, 0x0a, 0xc5, 0xae, 0x98, 0x54, 0x5b, 0x13, 0xaa, 0xda, 0x0b, - 0x00, 0x71, 0x66, 0x39, 0x79, 0xab, 0xa8, 0xf5, 0x61, 0x23, 0xe3, 0x9d, 0x51, 0x8b, 0x3c, 0x07, - 0x53, 0x4c, 0xf5, 0xa1, 0x4d, 0x19, 0x86, 0xad, 0xcf, 0xf1, 0xd7, 0x78, 0x29, 0x4a, 0x28, 0x79, - 0x85, 0x69, 0xa9, 0xb1, 0xc2, 0x22, 0xa3, 0xab, 0xcf, 0xc7, 0x5a, 0x6a, 0x0c, 0xc3, 0x44, 0x4d, - 0xc6, 0x3a, 0x65, 0xfa, 0x05, 0x97, 0x0d, 0x06, 0xeb, 0x5c, 0xe9, 0x40, 0x01, 0xe3, 0x76, 0xa5, - 0x94, 0x3e, 0xc2, 0xd7, 0x74, 0xd1, 0xb0, 0x2b, 0xa5, 0xe0, 0xd8, 0xd7, 0x82, 0x7d, 0x8c, 0xbc, - 0x10, 0x9d, 0x15, 0xce, 0xb6, 0x03, 0xae, 0x32, 0xbf, 0x6a, 0x9e, 0xb5, 0x72, 0x5c, 0x43, 0x62, - 0xd6, 0x0e, 0x7f, 0xd8, 0x1a, 0xef, 0x58, 0xf4, 0x05, 0x98, 0x4f, 0xee, 0x42, 0xb9, 0xdf, 0x7c, - 0x7c, 0x6d, 0x12, 0xce, 0xdd, 0x6c, 0xb9, 0x5e, 0x3a, 0x5b, 0x54, 0x56, 0xb2, 0x70, 0x6b, 0xe4, - 0x64, 0xe1, 0x3a, 0x9e, 0x4b, 0xa6, 0xe2, 0xce, 0x8e, 0xe7, 0x52, 0x79, 0xd1, 0x93, 0x75, 0xc9, - 0x9f, 0x5a, 0xf0, 0xb4, 0xd3, 0x14, 0xe7, 0x02, 0xa7, 0x2d, 0x4b, 0x8d, 0x1c, 0xb7, 0x72, 0x45, - 0x87, 0x63, 0xee, 0xf2, 0xfd, 0x1f, 0xbf, 0x52, 0x39, 0x81, 0xaa, 0x18, 0xf1, 0x9f, 0x92, 0x5f, - 0xf0, 0xf4, 0x49, 0x55, 0xf1, 0x44, 0xf6, 0xc9, 0xdf, 0x80, 0x85, 0xc4, 0x07, 0x4b, 0x4b, 0x78, - 0x49, 0x5c, 0x58, 0xd4, 0x93, 0x20, 0x4c, 0xd7, 0x5d, 0x7e, 0x13, 0x3e, 0xfc, 0x50, 0x3e, 0x47, - 0x9a, 0x6c, 0x5f, 0xb1, 0xa0, 0x24, 0xec, 0xda, 0x48, 0x77, 0x53, 0x4e, 0x98, 0xa9, 0x93, 0x77, - 0xa5, 0xb6, 0x91, 0xe1, 0x84, 0xc9, 0x64, 0xf9, 0xbe, 0xeb, 0x35, 0xe5, 0x28, 0x6b, 0x59, 0xfe, - 0x86, 0xeb, 0x35, 0x91, 0x43, 0xb4, 0xb4, 0x2f, 0x0c, 0xb4, 0x37, 0xfd, 0xba, 0x05, 0xf3, 0x3c, - 0x06, 0x36, 0x3e, 0x13, 0xbe, 0xac, 0x9d, 0x8d, 0x04, 0x1b, 0x97, 0x92, 0xce, 0x46, 0x0f, 0x8e, - 0xca, 0xb3, 0x22, 0x6a, 0x36, 0xe9, 0x7b, 0xf4, 0x39, 0x69, 0x48, 0xe2, 0x2e, 0x51, 0x13, 0x23, - 0xdb, 0x39, 0xb4, 0xa1, 0xb5, 0xae, 0x90, 0x60, 0x8c, 0xcf, 0x7e, 0x17, 0xe6, 0xcc, 0x60, 0x16, - 0xf2, 0x32, 0xcc, 0x76, 0x5d, 0xaf, 0x95, 0x0c, 0x7a, 0xd4, 0xc6, 0xf6, 0x5a, 0x0c, 0x42, 0xb3, - 0x1e, 0x6f, 0xe6, 0xc7, 0xcd, 0x52, 0x36, 0xfa, 0x9a, 0x6f, 0x36, 0x8b, 0xff, 0xf0, 0x1c, 0xdb, - 0x19, 0x41, 0x53, 0xb9, 0xe7, 0xd8, 0xce, 0xa0, 0xf1, 0x93, 0xcb, 0xb1, 0x9d, 0xc5, 0xcc, 0xff, - 0x5d, 0x39, 0xb6, 0x7f, 0x06, 0x46, 0x4d, 0xae, 0xc8, 0xf6, 0xbb, 0x7b, 0x66, 0x60, 0xba, 0xee, - 0x71, 0x19, 0x99, 0x2e, 0xa1, 0xf6, 0x7b, 0x6c, 0xd9, 0xe8, 0x43, 0x6d, 0xa5, 0x17, 0xed, 0x11, - 0x0f, 0x8a, 0xa1, 0xdb, 0x3a, 0x78, 0x29, 0x27, 0x9b, 0x36, 0x43, 0x25, 0x1f, 0x66, 0x88, 0x03, - 0x62, 0x59, 0x21, 0x0a, 0x32, 0xf6, 0x77, 0x0b, 0xb0, 0x98, 0x3e, 0x79, 0xe7, 0xed, 0xd4, 0x40, - 0xbe, 0x6e, 0xc1, 0xbc, 0xd3, 0x8b, 0xf6, 0xa8, 0x17, 0xa9, 0x8b, 0xad, 0x5c, 0xde, 0x0c, 0x49, - 0xf6, 0x9d, 0x91, 0xc2, 0x2a, 0x41, 0x0b, 0x53, 0xb4, 0xc9, 0x5f, 0x81, 0xe9, 0xc8, 0xed, 0x50, - 0xbf, 0x27, 0xec, 0x71, 0x05, 0x71, 0x2e, 0xde, 0x16, 0x45, 0xa8, 0x60, 0xe4, 0x79, 0x76, 0x8e, - 0x61, 0x8a, 0x4c, 0x40, 0xa5, 0xf7, 0xeb, 0x62, 0x6c, 0x40, 0x14, 0xe5, 0xa8, 0x6b, 0x90, 0xfb, - 0x30, 0x2d, 0xdc, 0x1f, 0x94, 0x9f, 0xcb, 0x56, 0x4e, 0x16, 0x02, 0xe1, 0x61, 0x11, 0x0f, 0x81, - 0xf8, 0x1f, 0xa2, 0x22, 0x67, 0x7f, 0x1c, 0x46, 0x4c, 0x78, 0x69, 0x5f, 0x05, 0x82, 0x7e, 0xbb, - 0xbd, 0xe3, 0x34, 0xf6, 0xef, 0xb8, 0x5e, 0xd3, 0xbf, 0xc7, 0xa5, 0xe1, 0x2a, 0x94, 0x02, 0x19, - 0xf5, 0x18, 0xca, 0x89, 0xab, 0xc5, 0xa9, 0x0a, 0x87, 0x0c, 0x31, 0xae, 0x63, 0xff, 0xe1, 0x04, - 0x4c, 0xcb, 0x10, 0xdd, 0xc7, 0xe0, 0x7d, 0xbf, 0x9f, 0xb8, 0x96, 0xdd, 0xc8, 0x25, 0xb2, 0x78, - 0xa0, 0xeb, 0x7d, 0x98, 0x72, 0xbd, 0x7f, 0x23, 0x1f, 0x72, 0x27, 0xfb, 0xdd, 0xff, 0x41, 0x11, - 0x16, 0x52, 0x21, 0xcf, 0x4c, 0x25, 0xee, 0x73, 0x37, 0xbd, 0x9d, 0x6b, 0x54, 0xb5, 0x0e, 0x27, - 0x39, 0xd9, 0xf3, 0x34, 0x4c, 0xa4, 0x27, 0xbe, 0x95, 0xdb, 0xcb, 0x0b, 0x7f, 0x99, 0xa9, 0x78, - 0x44, 0x4f, 0x4a, 0xf2, 0x6d, 0x0b, 0xce, 0x39, 0xfd, 0x4f, 0x57, 0x48, 0xd3, 0xdd, 0xad, 0xdc, - 0xdf, 0xc4, 0xa8, 0x3e, 0x25, 0x99, 0xcc, 0x7a, 0x21, 0x04, 0xb3, 0x58, 0xb1, 0xff, 0xb3, 0x05, - 0x4f, 0x0e, 0x0c, 0xde, 0xe7, 0xb9, 0x9f, 0x82, 0x24, 0x54, 0xca, 0x8c, 0x9c, 0x53, 0x94, 0xe8, - 0x6b, 0xdc, 0x74, 0xba, 0x9e, 0x34, 0x79, 0xf2, 0x12, 0xcc, 0x71, 0x35, 0x91, 0x49, 0xcf, 0x88, - 0x76, 0xe5, 0x2d, 0x14, 0xbf, 0x8f, 0xa8, 0x1b, 0xe5, 0x98, 0xa8, 0x65, 0x7f, 0xdb, 0x82, 0xa5, - 0x41, 0x99, 0x80, 0x86, 0xb0, 0x91, 0xfc, 0xf5, 0x54, 0x04, 0x43, 0xb9, 0x2f, 0x82, 0x21, 0x65, - 0x25, 0x51, 0xc1, 0x0a, 0x86, 0x81, 0xa2, 0xf0, 0x10, 0x07, 0xfd, 0x6f, 0x58, 0x70, 0x71, 0xc0, - 0x82, 0xef, 0x8b, 0x64, 0xb1, 0x1e, 0x39, 0x92, 0x65, 0x62, 0xd8, 0x48, 0x16, 0xfb, 0x8f, 0x0a, - 0xb0, 0x28, 0xf9, 0x89, 0xcf, 0x0a, 0xaf, 0x24, 0xe2, 0x40, 0x7e, 0x2a, 0x15, 0x07, 0x72, 0x3e, - 0x5d, 0xff, 0x2f, 0x83, 0x40, 0x3e, 0x58, 0x41, 0x20, 0x7f, 0x31, 0x01, 0x17, 0x32, 0x13, 0x14, - 0x91, 0xaf, 0x65, 0xec, 0x5e, 0x77, 0x72, 0xce, 0x84, 0x34, 0xe4, 0xfe, 0x35, 0x6e, 0xe4, 0xc4, - 0xaf, 0x9a, 0x11, 0x0b, 0x62, 0x37, 0xda, 0x3d, 0x85, 0x9c, 0x4e, 0x23, 0x06, 0x2f, 0xd8, 0xbf, - 0x54, 0x80, 0x2b, 0xc3, 0x22, 0xfa, 0x80, 0x06, 0xb7, 0x85, 0x89, 0xe0, 0xb6, 0xc7, 0xa4, 0x59, - 0x9c, 0x4a, 0x9c, 0xdb, 0x3f, 0x99, 0xd4, 0xdb, 0x5e, 0xff, 0xfc, 0x1c, 0xca, 0x65, 0x61, 0x9a, - 0x69, 0x9f, 0x2a, 0xc9, 0x71, 0x2c, 0x0a, 0xa7, 0xeb, 0xa2, 0xf8, 0xc1, 0x51, 0xf9, 0x6c, 0x9c, - 0x26, 0x43, 0x16, 0xa2, 0x6a, 0x44, 0xae, 0xc0, 0x4c, 0x20, 0xa0, 0x2a, 0x9c, 0x47, 0xfa, 0x7d, - 0x88, 0x32, 0xd4, 0x50, 0xf2, 0x25, 0x43, 0x5d, 0x9f, 0x3c, 0xad, 0x6c, 0x30, 0x27, 0xb9, 0xb3, - 0xbc, 0x0d, 0x33, 0xa1, 0x4a, 0x40, 0x2c, 0x14, 0x97, 0x17, 0x87, 0x8c, 0x12, 0x73, 0x76, 0x68, - 0x5b, 0x65, 0x23, 0x16, 0xdf, 0xa7, 0x73, 0x15, 0x6b, 0x94, 0xc4, 0xd6, 0x27, 0x70, 0x61, 0x6e, - 0x87, 0xfe, 0xd3, 0x37, 0x89, 0x60, 0x5a, 0x3e, 0xd0, 0x28, 0xef, 0x01, 0xb7, 0x72, 0x8a, 0x08, - 0x91, 0xfe, 0xc2, 0xfc, 0x54, 0xa9, 0x2c, 0x41, 0x8a, 0x94, 0xfd, 0x03, 0x0b, 0x66, 0xe5, 0x1c, - 0x79, 0x0c, 0xe1, 0x72, 0x77, 0x93, 0xe1, 0x72, 0x57, 0x73, 0x91, 0x58, 0x03, 0x62, 0xe5, 0xee, - 0xc2, 0x9c, 0x99, 0x19, 0x8f, 0x7c, 0xd6, 0x90, 0xb8, 0xd6, 0x38, 0xb9, 0xa6, 0x94, 0x4c, 0x8e, - 0xa5, 0xb1, 0xfd, 0xdb, 0x25, 0xdd, 0x8b, 0xfc, 0xec, 0x6a, 0xce, 0x7c, 0xeb, 0xc4, 0x99, 0x6f, - 0x4e, 0xbc, 0x89, 0xfc, 0x27, 0xde, 0x2d, 0x98, 0x51, 0x62, 0x51, 0x2a, 0x0f, 0xcf, 0x9a, 0x0e, - 0xc4, 0x4c, 0x03, 0x61, 0xc8, 0x8c, 0xe5, 0xc2, 0xcf, 0xa0, 0x7a, 0x0c, 0xb5, 0xb8, 0xd6, 0x68, - 0xc8, 0x3b, 0x30, 0x7b, 0xcf, 0x0f, 0xf6, 0xdb, 0xbe, 0xc3, 0xd3, 0x9f, 0x43, 0x1e, 0x77, 0xd6, - 0xda, 0x64, 0x2c, 0xa2, 0x38, 0xee, 0xc4, 0xf8, 0xd1, 0x24, 0x46, 0x2a, 0xb0, 0xd0, 0x71, 0x3d, - 0xa4, 0x4e, 0x53, 0x47, 0xc5, 0x4d, 0x8a, 0x8c, 0xcb, 0x4a, 0xb5, 0xde, 0x4a, 0x82, 0x31, 0x5d, - 0x9f, 0x1b, 0x7f, 0x82, 0x84, 0xb5, 0x41, 0xa6, 0x55, 0xad, 0x8d, 0x3f, 0x19, 0x93, 0x16, 0x0c, - 0x11, 0xc6, 0x90, 0x2c, 0xc7, 0x14, 0x6d, 0xf2, 0x45, 0x98, 0x09, 0x65, 0xda, 0xbb, 0x7c, 0x9c, - 0x1d, 0xf4, 0xd9, 0x5e, 0x20, 0x8d, 0x87, 0x52, 0x95, 0xa0, 0x26, 0x48, 0x36, 0xe1, 0xbc, 0x32, - 0x9f, 0x24, 0x9e, 0xa8, 0x9a, 0x8a, 0xb3, 0x24, 0x61, 0x06, 0x1c, 0x33, 0x5b, 0x31, 0x55, 0x8e, - 0x67, 0x9c, 0x14, 0x77, 0x84, 0xc6, 0xb5, 0x1a, 0x5f, 0x7f, 0x4d, 0x94, 0xd0, 0x93, 0x82, 0x3e, - 0x67, 0xc6, 0x08, 0xfa, 0xac, 0xc3, 0x85, 0x34, 0x88, 0xa7, 0xbf, 0xe2, 0x19, 0xb7, 0x8c, 0x2d, - 0xb4, 0x96, 0x55, 0x09, 0xb3, 0xdb, 0x92, 0x3b, 0x50, 0x0a, 0x28, 0x3f, 0x64, 0x55, 0x94, 0x7b, - 0xd5, 0xc8, 0x8e, 0xa4, 0xa8, 0x10, 0x60, 0x8c, 0x8b, 0x8d, 0xbb, 0x93, 0xcc, 0x81, 0x7c, 0x2b, - 0xc7, 0xb7, 0x51, 0xe5, 0xd8, 0x0f, 0x48, 0x4b, 0x67, 0xff, 0xdb, 0x05, 0x38, 0x93, 0xb0, 0x01, - 0x91, 0x67, 0xa1, 0xc8, 0xf3, 0x81, 0x71, 0x69, 0x35, 0x13, 0x4b, 0x54, 0xd1, 0x39, 0x02, 0x46, - 0x7e, 0xd9, 0x82, 0x85, 0x6e, 0xe2, 0x5a, 0x45, 0x09, 0xf2, 0x71, 0x0d, 0xa7, 0x09, 0xa4, 0xc6, - 0xeb, 0x01, 0x49, 0x62, 0x98, 0xa6, 0xce, 0xe4, 0x81, 0xf4, 0xc6, 0x6e, 0xd3, 0x80, 0xd7, 0x96, - 0x8a, 0x9e, 0x46, 0xb1, 0x96, 0x04, 0x63, 0xba, 0x3e, 0x1b, 0x61, 0xfe, 0x75, 0xe3, 0xbc, 0xbe, - 0x57, 0x51, 0x08, 0x30, 0xc6, 0x45, 0x5e, 0x83, 0x79, 0x99, 0xfa, 0xb6, 0xe6, 0x37, 0xaf, 0x3b, - 0xe1, 0x9e, 0x3c, 0xe1, 0xe8, 0x13, 0xd9, 0x5a, 0x02, 0x8a, 0xa9, 0xda, 0xfc, 0xdb, 0xe2, 0xfc, - 0xc2, 0x1c, 0xc1, 0x54, 0xf2, 0x71, 0x85, 0xb5, 0x24, 0x18, 0xd3, 0xf5, 0xc9, 0xf3, 0xc6, 0x36, - 0x24, 0xee, 0xed, 0xb5, 0x34, 0xc8, 0xd8, 0x8a, 0x2a, 0xb0, 0xd0, 0xe3, 0x07, 0xc2, 0xa6, 0x02, - 0xca, 0xf5, 0xa8, 0x09, 0xde, 0x4e, 0x82, 0x31, 0x5d, 0x9f, 0xbc, 0x0a, 0x67, 0x02, 0x26, 0x6c, - 0x35, 0x02, 0x71, 0x99, 0xaf, 0xef, 0x6a, 0xd1, 0x04, 0x62, 0xb2, 0x2e, 0x79, 0x1d, 0xce, 0xc6, - 0x99, 0x22, 0x15, 0x02, 0x71, 0xbb, 0xaf, 0xd3, 0x96, 0x55, 0xd2, 0x15, 0xb0, 0xbf, 0x0d, 0xf9, - 0x5b, 0xb0, 0x68, 0xf4, 0xc4, 0x86, 0xd7, 0xa4, 0xf7, 0x65, 0x36, 0x3f, 0xfe, 0x6a, 0xce, 0x5a, - 0x0a, 0x86, 0x7d, 0xb5, 0xc9, 0xa7, 0x60, 0xbe, 0xe1, 0xb7, 0xdb, 0x5c, 0xc6, 0x89, 0xc4, 0xfe, - 0x22, 0x6d, 0x9f, 0x48, 0x70, 0x98, 0x80, 0x60, 0xaa, 0x26, 0xb9, 0x01, 0xc4, 0xdf, 0x61, 0xea, - 0x15, 0x6d, 0xbe, 0x2e, 0x9e, 0x77, 0x67, 0x1a, 0xc7, 0x99, 0x64, 0x2c, 0xc8, 0x9b, 0x7d, 0x35, - 0x30, 0xa3, 0x15, 0xcf, 0x7a, 0x66, 0xc4, 0xce, 0xce, 0xe7, 0xf1, 0x12, 0x5d, 0xda, 0x7c, 0xf1, - 0xd0, 0xc0, 0xd9, 0x00, 0xa6, 0x44, 0x68, 0x4e, 0x3e, 0xf9, 0xfb, 0xcc, 0x1c, 0xdf, 0xf1, 0x1e, - 0x21, 0x4a, 0x51, 0x52, 0x22, 0xbf, 0x00, 0xa5, 0x1d, 0xf5, 0xe0, 0x03, 0x4f, 0xda, 0x37, 0xf6, - 0xbe, 0x98, 0x7a, 0xbb, 0x24, 0x3e, 0x9e, 0x6b, 0x00, 0xc6, 0x24, 0xc9, 0x73, 0x30, 0x7b, 0xbd, - 0x56, 0xd1, 0xb3, 0xf0, 0x2c, 0x1f, 0xfd, 0x49, 0xd6, 0x04, 0x4d, 0x00, 0x5b, 0x61, 0x5a, 0x7d, - 0x23, 0xc9, 0x37, 0x54, 0x32, 0xb4, 0x31, 0x56, 0x9b, 0xdf, 0xc0, 0x63, 0x7d, 0xe9, 0x5c, 0xaa, - 0xb6, 0x2c, 0x47, 0x5d, 0x83, 0xbc, 0x0d, 0xb3, 0x72, 0xbf, 0xe0, 0xb2, 0xe9, 0xfc, 0xa3, 0xc5, - 0x65, 0x63, 0x8c, 0x02, 0x4d, 0x7c, 0xfc, 0xda, 0x98, 0xe7, 0xc1, 0xa7, 0xd7, 0x7a, 0xed, 0xf6, - 0xd2, 0x05, 0x2e, 0x37, 0xe3, 0x6b, 0xe3, 0x18, 0x84, 0x66, 0x3d, 0xf2, 0xa2, 0xf2, 0xa4, 0xfa, - 0x50, 0xe2, 0x1e, 0x5d, 0x7b, 0x52, 0x69, 0xa5, 0x7b, 0x40, 0xe8, 0xc6, 0xc5, 0x87, 0xb8, 0x30, - 0xed, 0xc0, 0xb2, 0xd2, 0xf8, 0xfa, 0x17, 0xc9, 0xd2, 0x52, 0xc2, 0x54, 0xb2, 0x7c, 0x67, 0x60, - 0x4d, 0x3c, 0x01, 0x0b, 0xd9, 0x81, 0x82, 0xd3, 0xde, 0x59, 0x7a, 0x32, 0x0f, 0xd5, 0xb5, 0xb2, - 0x59, 0x95, 0x33, 0x8a, 0xbb, 0x5b, 0x56, 0x36, 0xab, 0xc8, 0x90, 0x13, 0x17, 0x26, 0x9d, 0xf6, - 0x4e, 0xb8, 0xb4, 0xcc, 0xd7, 0x6c, 0x6e, 0x44, 0x62, 0xe3, 0xc1, 0x66, 0x35, 0x44, 0x4e, 0xc2, - 0xfe, 0xf2, 0x84, 0xbe, 0xa8, 0xd1, 0x29, 0x94, 0xdf, 0x35, 0x17, 0x90, 0x95, 0xc7, 0x0b, 0xe5, - 0x7d, 0x0f, 0xb0, 0x88, 0xbd, 0x2f, 0x73, 0xf9, 0x74, 0xb5, 0xc8, 0xc8, 0x25, 0x39, 0x55, 0x32, - 0x3d, 0xb4, 0x38, 0x3d, 0x27, 0x05, 0x86, 0xfd, 0x5d, 0xd0, 0x46, 0xbf, 0x94, 0x17, 0x52, 0x00, - 0x45, 0x37, 0x8c, 0x5c, 0x3f, 0xc7, 0x70, 0xe5, 0x54, 0x5e, 0x65, 0x1e, 0x0d, 0xc1, 0x01, 0x28, - 0x48, 0x31, 0x9a, 0x5e, 0xcb, 0xf5, 0xee, 0xcb, 0xcf, 0xbf, 0x95, 0xbb, 0x7b, 0x91, 0xa0, 0xc9, - 0x01, 0x28, 0x48, 0x91, 0xbb, 0x62, 0x52, 0xe7, 0xf3, 0x1a, 0xfd, 0x66, 0x35, 0x45, 0x2f, 0x39, - 0xb9, 0xef, 0x42, 0x21, 0xec, 0xb8, 0x52, 0x5d, 0x1a, 0x93, 0x56, 0x7d, 0x6b, 0x23, 0x8b, 0x56, - 0x7d, 0x6b, 0x03, 0x19, 0x11, 0xf2, 0x55, 0x0b, 0xc0, 0xe9, 0xec, 0x38, 0x61, 0xe8, 0x34, 0xb5, - 0x75, 0x66, 0xcc, 0xf7, 0x12, 0x2a, 0x1a, 0x5f, 0x8a, 0x34, 0x77, 0xa8, 0x8d, 0xa1, 0x68, 0x50, - 0x26, 0xef, 0xc0, 0xb4, 0x23, 0x5e, 0x66, 0x93, 0xbe, 0xe1, 0xf9, 0x3c, 0x37, 0x98, 0xe2, 0x80, - 0x9b, 0x69, 0x24, 0x08, 0x15, 0x41, 0x46, 0x3b, 0x0a, 0x1c, 0xba, 0xeb, 0xee, 0x4b, 0xe3, 0x50, - 0x7d, 0xec, 0x27, 0x13, 0x18, 0xb2, 0x2c, 0xda, 0x12, 0x84, 0x8a, 0xa0, 0x78, 0x29, 0xdb, 0xf1, - 0x1c, 0x1d, 0xf1, 0x97, 0x4f, 0x5c, 0xa8, 0x19, 0x43, 0x68, 0xbc, 0x94, 0x6d, 0x12, 0xc2, 0x24, - 0x5d, 0x72, 0x00, 0x53, 0x0e, 0x7f, 0x33, 0x52, 0x1e, 0xc5, 0x30, 0x8f, 0xf7, 0x27, 0x53, 0x7d, - 0xc0, 0x85, 0x8b, 0x7c, 0x99, 0x52, 0x52, 0x23, 0xbf, 0x61, 0xc1, 0xb4, 0x70, 0x5b, 0x66, 0x0a, - 0x29, 0xfb, 0xf6, 0x2f, 0x9c, 0x42, 0x7e, 0x76, 0xe9, 0x52, 0x2d, 0x9d, 0x90, 0x3e, 0xaa, 0x7d, - 0x32, 0x45, 0xe9, 0x89, 0x4e, 0xd5, 0x8a, 0xbb, 0xe5, 0x4f, 0xc1, 0x9c, 0x89, 0x65, 0x24, 0xb7, - 0xea, 0x1f, 0x17, 0x00, 0x78, 0x47, 0x8b, 0x1c, 0x1f, 0x1d, 0x9e, 0x4c, 0x76, 0xcf, 0x6f, 0xe6, - 0xf3, 0xd8, 0xa7, 0x99, 0xaa, 0x03, 0x64, 0xe6, 0xd8, 0x3d, 0xbf, 0x89, 0x92, 0x08, 0x69, 0xc1, - 0x64, 0xd7, 0x89, 0xf6, 0xf2, 0xcf, 0x0b, 0x32, 0x23, 0x82, 0x5d, 0xa3, 0x3d, 0xe4, 0x04, 0xc8, - 0x7b, 0x56, 0xec, 0x1a, 0x53, 0xc8, 0xc7, 0x81, 0x41, 0xf5, 0xd9, 0x8a, 0x74, 0x86, 0x11, 0x23, - 0x38, 0xd0, 0x45, 0x66, 0xf9, 0x7d, 0x0b, 0xe6, 0xcc, 0xaa, 0x19, 0xc3, 0xf4, 0x73, 0xe6, 0x30, - 0xe5, 0xd9, 0x1f, 0xe6, 0x88, 0xff, 0x37, 0x0b, 0x8c, 0xd7, 0xdb, 0x63, 0xef, 0x71, 0x6b, 0x68, - 0xef, 0xf1, 0x89, 0x11, 0xbd, 0xc7, 0x0b, 0x23, 0x79, 0x8f, 0x4f, 0x8e, 0xee, 0x3d, 0x5e, 0x1c, - 0xec, 0x3d, 0x6e, 0x7f, 0xd3, 0x82, 0xb3, 0x7d, 0xbb, 0x0d, 0xd3, 0x83, 0x03, 0xdf, 0x8f, 0x06, - 0x78, 0x5d, 0x62, 0x0c, 0x42, 0xb3, 0x1e, 0x59, 0x87, 0x45, 0xf9, 0x74, 0x42, 0xbd, 0xdb, 0x76, - 0x33, 0x73, 0xb6, 0x6c, 0xa7, 0xe0, 0xd8, 0xd7, 0xc2, 0xfe, 0xae, 0x05, 0xb3, 0x46, 0xa4, 0x37, - 0xfb, 0x0e, 0x1e, 0x11, 0x2f, 0xd9, 0x88, 0x9d, 0xe4, 0xf8, 0x45, 0x95, 0x80, 0x89, 0x3b, 0xd3, - 0x96, 0x91, 0x58, 0x3b, 0xbe, 0x33, 0x65, 0xa5, 0x28, 0xa1, 0x22, 0x65, 0x32, 0xed, 0xf2, 0x4e, - 0x2f, 0x98, 0x29, 0x93, 0x69, 0x17, 0x39, 0x84, 0x93, 0x63, 0x07, 0x02, 0x19, 0x58, 0x60, 0x3c, - 0x52, 0xe1, 0x04, 0x11, 0x0a, 0x18, 0xb9, 0x04, 0x05, 0xea, 0x35, 0xa5, 0xf5, 0x42, 0x3f, 0x23, - 0x79, 0xd5, 0x6b, 0x22, 0x2b, 0xb7, 0xdf, 0x84, 0xb9, 0x3a, 0x6d, 0x04, 0x34, 0x7a, 0x83, 0x1e, - 0x0e, 0xfd, 0x2e, 0x25, 0x9b, 0xed, 0xa9, 0x77, 0x29, 0x59, 0x73, 0x56, 0x6e, 0xff, 0x33, 0x0b, - 0x52, 0x2f, 0xa9, 0x18, 0xf7, 0x27, 0xd6, 0xc0, 0xfb, 0x13, 0xd3, 0xe6, 0x3e, 0x71, 0xa2, 0xcd, - 0xfd, 0x06, 0x90, 0x0e, 0x5b, 0x0a, 0x89, 0x77, 0x83, 0xa4, 0xe1, 0x28, 0xce, 0x2b, 0xd1, 0x57, - 0x03, 0x33, 0x5a, 0xd9, 0xff, 0x54, 0x30, 0x6b, 0xbe, 0xad, 0xf2, 0xf0, 0x0e, 0xe8, 0x41, 0x91, - 0xa3, 0x92, 0xd6, 0xb3, 0x31, 0x2d, 0xcf, 0xfd, 0xf9, 0x99, 0xe2, 0x81, 0x94, 0x4b, 0x9e, 0x53, - 0xb3, 0xff, 0x48, 0xf0, 0x6a, 0x3c, 0xbe, 0x32, 0x04, 0xaf, 0x9d, 0x24, 0xaf, 0xd7, 0xf3, 0x92, - 0x95, 0xd9, 0x3c, 0x92, 0x15, 0x80, 0x2e, 0x0d, 0x1a, 0xd4, 0x8b, 0x54, 0xbc, 0x4b, 0x51, 0x46, - 0x5e, 0xea, 0x52, 0x34, 0x6a, 0xd8, 0xdf, 0x60, 0x0b, 0x28, 0x76, 0x2b, 0x25, 0x57, 0xd2, 0xbe, - 0xa2, 0xe9, 0xc5, 0xa1, 0x5d, 0x45, 0x8d, 0x28, 0x88, 0x89, 0x87, 0x44, 0x41, 0x7c, 0x04, 0xa6, - 0x03, 0xbf, 0x4d, 0x2b, 0x81, 0x97, 0x76, 0x70, 0x41, 0x56, 0x8c, 0x37, 0x51, 0xc1, 0xed, 0x5f, - 0xb3, 0x60, 0x31, 0x1d, 0xa6, 0x95, 0xbb, 0x03, 0xab, 0x19, 0x4b, 0x5e, 0x18, 0x3d, 0x96, 0xdc, - 0x7e, 0x8f, 0x31, 0x19, 0xb9, 0x8d, 0x7d, 0xd7, 0x13, 0xe1, 0xd7, 0xac, 0xe7, 0x3e, 0x02, 0xd3, - 0x54, 0xbe, 0x3c, 0x29, 0x8c, 0xc0, 0x9a, 0x49, 0xf5, 0xe0, 0xa4, 0x82, 0x93, 0x0a, 0x2c, 0xa8, - 0xab, 0x2f, 0x65, 0xb9, 0x17, 0x69, 0x23, 0xb4, 0xa5, 0x70, 0x3d, 0x09, 0xc6, 0x74, 0x7d, 0xfb, - 0x4b, 0x30, 0x6b, 0x6c, 0x4a, 0x5c, 0x7e, 0xdf, 0x77, 0x1a, 0x51, 0x5a, 0xee, 0x5d, 0x65, 0x85, - 0x28, 0x60, 0xfc, 0x82, 0x41, 0x44, 0x91, 0xa4, 0xe4, 0x9e, 0x8c, 0x1d, 0x91, 0x50, 0x86, 0x2c, - 0xa0, 0x2d, 0x7a, 0x5f, 0xe5, 0x3d, 0x57, 0xc8, 0x90, 0x15, 0xa2, 0x80, 0xd9, 0xcf, 0xc3, 0x8c, - 0x4a, 0xee, 0xc3, 0x33, 0x64, 0x28, 0xe3, 0xb7, 0x99, 0x21, 0xc3, 0x0f, 0x22, 0xe4, 0x10, 0xfb, - 0x2d, 0x98, 0x51, 0x39, 0x88, 0x1e, 0x5e, 0x9b, 0x89, 0xa2, 0xd0, 0x73, 0xaf, 0xfb, 0x61, 0xa4, - 0x12, 0x27, 0x89, 0xfb, 0xb9, 0x9b, 0x1b, 0xbc, 0x0c, 0x35, 0xd4, 0x7e, 0x11, 0x16, 0x52, 0xf7, - 0xb4, 0x43, 0xa4, 0xd3, 0xf8, 0xbd, 0x02, 0xcc, 0x99, 0xd7, 0x75, 0x43, 0xac, 0xe2, 0xe1, 0x85, - 0x63, 0xc6, 0x15, 0x5b, 0x61, 0xc4, 0x2b, 0x36, 0xf3, 0x4e, 0x73, 0xf2, 0x74, 0xef, 0x34, 0x8b, - 0xf9, 0xdc, 0x69, 0x1a, 0x77, 0xef, 0x53, 0x8f, 0xef, 0xee, 0xfd, 0x77, 0x8a, 0x30, 0x9f, 0xcc, - 0xcf, 0x38, 0xc4, 0x48, 0x3e, 0xdf, 0x37, 0x92, 0x23, 0xda, 0xf4, 0x0b, 0xe3, 0xda, 0xf4, 0x27, - 0xc7, 0xb5, 0xe9, 0x17, 0x1f, 0xc1, 0xa6, 0xdf, 0x6f, 0x91, 0x9f, 0x1a, 0xda, 0x22, 0xff, 0x69, - 0xed, 0x95, 0x37, 0x9d, 0x70, 0x63, 0x89, 0xbd, 0xf2, 0x48, 0x72, 0x18, 0xd6, 0xfc, 0x66, 0xa6, - 0x77, 0xe3, 0xcc, 0x43, 0x6c, 0x97, 0x41, 0xa6, 0x13, 0xdd, 0xe8, 0xd7, 0x86, 0x1f, 0x1a, 0xc1, - 0x81, 0xee, 0x65, 0x98, 0x95, 0xf3, 0x89, 0xab, 0xa0, 0x90, 0x54, 0x5f, 0xeb, 0x31, 0x08, 0xcd, - 0x7a, 0xfc, 0xe9, 0xee, 0xe4, 0xcb, 0xe6, 0xfc, 0x8a, 0xc4, 0x7c, 0xba, 0x3b, 0xf5, 0x12, 0x7a, - 0xba, 0xbe, 0xfd, 0x45, 0xb8, 0x90, 0x69, 0x46, 0xe0, 0x26, 0x5c, 0xae, 0x1d, 0xd1, 0xa6, 0xac, - 0x60, 0xb0, 0x91, 0x4a, 0xf9, 0xbf, 0x7c, 0x67, 0x60, 0x4d, 0x3c, 0x01, 0x8b, 0xfd, 0x9d, 0x02, - 0xcc, 0x27, 0xdf, 0x7d, 0x24, 0xf7, 0xb4, 0xd1, 0x31, 0x17, 0x7b, 0xa7, 0x40, 0x6b, 0xe4, 0xfc, - 0x1b, 0x78, 0x59, 0x71, 0x8f, 0xcf, 0xaf, 0x1d, 0x9d, 0x80, 0xf0, 0xf4, 0x08, 0xcb, 0x5b, 0x02, - 0x49, 0x8e, 0x3f, 0xed, 0x18, 0x87, 0x07, 0xca, 0xd3, 0x6c, 0xee, 0xd4, 0xe3, 0x88, 0x3d, 0x4d, - 0x0a, 0x0d, 0xb2, 0x6c, 0x6f, 0x39, 0xa0, 0x81, 0xbb, 0xeb, 0xea, 0x37, 0xab, 0xb9, 0xe4, 0x7e, - 0x4b, 0x96, 0xa1, 0x86, 0xda, 0xef, 0x4d, 0x40, 0xfc, 0x9e, 0x3f, 0x7f, 0x1c, 0x2d, 0x34, 0x4e, - 0x0e, 0x72, 0xd8, 0x6e, 0x8c, 0xfb, 0x02, 0x61, 0x8c, 0x51, 0x7a, 0x4c, 0x1b, 0x25, 0x98, 0xa0, - 0xf8, 0x13, 0x78, 0xc7, 0xdf, 0x81, 0x85, 0x54, 0x3a, 0x8a, 0xdc, 0x73, 0xb6, 0xfe, 0xa8, 0x00, - 0x25, 0x1d, 0xae, 0x43, 0x3e, 0x99, 0x30, 0xe3, 0x94, 0xaa, 0x1f, 0x36, 0x5e, 0xee, 0xd9, 0xf3, - 0x9b, 0x0f, 0x8e, 0xca, 0x0b, 0xba, 0x72, 0xca, 0x24, 0x73, 0x09, 0x0a, 0xbd, 0xa0, 0x9d, 0x3e, - 0xa7, 0xdd, 0xc6, 0x4d, 0x64, 0xe5, 0x66, 0x88, 0x51, 0xe1, 0xb1, 0x86, 0x18, 0xb1, 0x5d, 0x72, - 0xc7, 0x6f, 0x1e, 0xa6, 0x5f, 0xfa, 0xa9, 0xfa, 0xcd, 0x43, 0xe4, 0x10, 0xf2, 0x1a, 0xcc, 0xcb, - 0xb8, 0x29, 0xf3, 0xfd, 0xf3, 0x42, 0x7c, 0xf9, 0xbe, 0x9d, 0x80, 0x62, 0xaa, 0x36, 0xdb, 0x65, - 0xef, 0x86, 0xbe, 0xc7, 0x33, 0xf1, 0x4e, 0x25, 0x6f, 0xea, 0x6e, 0xd4, 0xdf, 0xbc, 0xc9, 0xcd, - 0x49, 0xba, 0x46, 0x22, 0x34, 0x6b, 0xfa, 0xa1, 0xa1, 0x59, 0xeb, 0x02, 0x37, 0xe3, 0x96, 0xef, - 0x28, 0x73, 0xd5, 0x2b, 0x0a, 0x2f, 0x2b, 0x7b, 0x70, 0x74, 0x82, 0xa1, 0x4f, 0xb7, 0xb4, 0x6f, - 0xc3, 0x42, 0xaa, 0xc3, 0xd4, 0xb9, 0xda, 0xca, 0x3e, 0x57, 0x0f, 0xf7, 0x38, 0xcf, 0xbf, 0xb0, - 0xe0, 0x6c, 0x9f, 0x08, 0x18, 0x36, 0x82, 0x30, 0xbd, 0x19, 0x4d, 0x3c, 0xfa, 0x66, 0x54, 0x18, - 0x6d, 0x33, 0xaa, 0xee, 0x7c, 0xef, 0x87, 0x97, 0x9f, 0xf8, 0xfe, 0x0f, 0x2f, 0x3f, 0xf1, 0xc7, - 0x3f, 0xbc, 0xfc, 0xc4, 0x7b, 0xc7, 0x97, 0xad, 0xef, 0x1d, 0x5f, 0xb6, 0xbe, 0x7f, 0x7c, 0xd9, - 0xfa, 0xe3, 0xe3, 0xcb, 0xd6, 0x7f, 0x3a, 0xbe, 0x6c, 0x7d, 0xf3, 0x47, 0x97, 0x9f, 0xf8, 0xec, - 0xa7, 0xe3, 0x09, 0xba, 0xaa, 0x26, 0x28, 0xff, 0xf1, 0x31, 0x35, 0x1d, 0x57, 0xbb, 0xfb, 0xad, - 0x55, 0x36, 0x41, 0x57, 0x75, 0x89, 0x9a, 0xa0, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xf0, - 0x4e, 0x06, 0x76, 0x9f, 0x00, 0x00, + // 8574 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x8c, 0x24, 0xd7, + 0x71, 0x98, 0x7a, 0x67, 0x67, 0x77, 0xa6, 0x66, 0xbf, 0xee, 0xdd, 0x1d, 0xb9, 0x5c, 0x92, 0x37, + 0x54, 0xd3, 0x61, 0x48, 0x8b, 0xda, 0x95, 0xf8, 0x91, 0x50, 0xa2, 0xc2, 0x64, 0x66, 0xf7, 0x8e, + 0xb7, 0xc7, 0xdd, 0xbb, 0x65, 0xcd, 0x1e, 0xcf, 0x96, 0x44, 0x5b, 0xbd, 0x33, 0x6f, 0x67, 0xfb, + 0x76, 0xa6, 0x7b, 0xd4, 0xdd, 0xb3, 0x77, 0x4b, 0x11, 0x16, 0x25, 0x81, 0xb2, 0xac, 0x48, 0xb0, + 0x12, 0x5b, 0x08, 0x82, 0x04, 0x81, 0x62, 0x18, 0x70, 0x12, 0xfb, 0x47, 0x60, 0x38, 0x48, 0x7e, + 0x18, 0x88, 0x11, 0xc5, 0x86, 0x02, 0xc4, 0x81, 0xfc, 0x23, 0x91, 0x13, 0xc0, 0xeb, 0x68, 0x9d, + 0x3f, 0x31, 0x12, 0x08, 0x09, 0x14, 0x18, 0xe1, 0x8f, 0x20, 0x78, 0x9f, 0xfd, 0xba, 0xa7, 0x67, + 0xbf, 0xa6, 0xf7, 0x48, 0x27, 0xfe, 0x37, 0xf3, 0xaa, 0x5e, 0x55, 0xf5, 0xfb, 0xac, 0x57, 0xaf, + 0xaa, 0x1e, 0xac, 0xb5, 0xdd, 0x68, 0xa7, 0xbf, 0xb5, 0xd8, 0xf4, 0xbb, 0x4b, 0x4e, 0xd0, 0xf6, + 0x7b, 0x81, 0x7f, 0x97, 0xff, 0xf8, 0x68, 0xe0, 0x77, 0x3a, 0x7e, 0x3f, 0x0a, 0x97, 0x7a, 0xbb, + 0xed, 0x25, 0xa7, 0xe7, 0x86, 0x4b, 0xba, 0x64, 0xef, 0xe3, 0x4e, 0xa7, 0xb7, 0xe3, 0x7c, 0x7c, + 0xa9, 0x4d, 0x3d, 0x1a, 0x38, 0x11, 0x6d, 0x2d, 0xf6, 0x02, 0x3f, 0xf2, 0xc9, 0xa7, 0x62, 0x6a, + 0x8b, 0x8a, 0x1a, 0xff, 0xf1, 0xb3, 0xaa, 0xee, 0x62, 0x6f, 0xb7, 0xbd, 0xc8, 0xa8, 0x2d, 0xea, + 0x12, 0x45, 0x6d, 0xe1, 0xa3, 0x86, 0x2c, 0x6d, 0xbf, 0xed, 0x2f, 0x71, 0xa2, 0x5b, 0xfd, 0x6d, + 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xb3, 0x85, 0x27, 0x77, 0x5f, 0x0a, 0x17, 0x5d, 0x9f, 0xc9, + 0xb6, 0xb4, 0xe5, 0x44, 0xcd, 0x9d, 0xa5, 0xbd, 0x01, 0x89, 0x16, 0x6c, 0x03, 0xa9, 0xe9, 0x07, + 0x34, 0x0b, 0xe7, 0x85, 0x18, 0xa7, 0xeb, 0x34, 0x77, 0x5c, 0x8f, 0x06, 0xfb, 0xf1, 0x57, 0x77, + 0x69, 0xe4, 0x64, 0xd5, 0x5a, 0x1a, 0x56, 0x2b, 0xe8, 0x7b, 0x91, 0xdb, 0xa5, 0x03, 0x15, 0xfe, + 0xca, 0x71, 0x15, 0xc2, 0xe6, 0x0e, 0xed, 0x3a, 0x03, 0xf5, 0x9e, 0x1f, 0x56, 0xaf, 0x1f, 0xb9, + 0x9d, 0x25, 0xd7, 0x8b, 0xc2, 0x28, 0x48, 0x57, 0xb2, 0x7f, 0x54, 0x80, 0x72, 0x6d, 0xad, 0xde, + 0x88, 0x9c, 0xa8, 0x1f, 0x92, 0xaf, 0x5a, 0x30, 0xd5, 0xf1, 0x9d, 0x56, 0xdd, 0xe9, 0x38, 0x5e, + 0x93, 0x06, 0xf3, 0xd6, 0x13, 0xd6, 0xd3, 0x95, 0xe7, 0xd6, 0x16, 0x47, 0xe9, 0xaf, 0xc5, 0xda, + 0xbd, 0x10, 0x69, 0xe8, 0xf7, 0x83, 0x26, 0x45, 0xba, 0x5d, 0xbf, 0xf4, 0xbd, 0x83, 0xea, 0x87, + 0x0e, 0x0f, 0xaa, 0x53, 0x6b, 0x06, 0x27, 0x4c, 0xf0, 0x25, 0xdf, 0xb6, 0xe0, 0x42, 0xd3, 0xf1, + 0x9c, 0x60, 0x7f, 0xd3, 0x09, 0xda, 0x34, 0x7a, 0x35, 0xf0, 0xfb, 0xbd, 0xf9, 0xb1, 0x73, 0x90, + 0xe6, 0x11, 0x29, 0xcd, 0x85, 0xe5, 0x34, 0x3b, 0x1c, 0x94, 0x80, 0xcb, 0x15, 0x46, 0xce, 0x56, + 0x87, 0x9a, 0x72, 0x15, 0xce, 0x53, 0xae, 0x46, 0x9a, 0x1d, 0x0e, 0x4a, 0x40, 0x9e, 0x81, 0x49, + 0xd7, 0x6b, 0x07, 0x34, 0x0c, 0xe7, 0xc7, 0x9f, 0xb0, 0x9e, 0x2e, 0xd7, 0x67, 0x65, 0xf5, 0xc9, + 0x55, 0x51, 0x8c, 0x0a, 0x6e, 0xff, 0x66, 0x01, 0x2e, 0xd4, 0xd6, 0xea, 0x9b, 0x81, 0xb3, 0xbd, + 0xed, 0x36, 0xd1, 0xef, 0x47, 0xae, 0xd7, 0x36, 0x09, 0x58, 0x47, 0x13, 0x20, 0x2f, 0x42, 0x25, + 0xa4, 0xc1, 0x9e, 0xdb, 0xa4, 0x1b, 0x7e, 0x10, 0xf1, 0x4e, 0x29, 0xd6, 0x2f, 0x4a, 0xf4, 0x4a, + 0x23, 0x06, 0xa1, 0x89, 0xc7, 0xaa, 0x05, 0xbe, 0x1f, 0x49, 0x38, 0x6f, 0xb3, 0x72, 0x5c, 0x0d, + 0x63, 0x10, 0x9a, 0x78, 0x64, 0x05, 0xe6, 0x1c, 0xcf, 0xf3, 0x23, 0x27, 0x72, 0x7d, 0x6f, 0x23, + 0xa0, 0xdb, 0xee, 0x7d, 0xf9, 0x89, 0xf3, 0xb2, 0xee, 0x5c, 0x2d, 0x05, 0xc7, 0x81, 0x1a, 0xe4, + 0x5b, 0x16, 0xcc, 0x85, 0x91, 0xdb, 0xdc, 0x75, 0x3d, 0x1a, 0x86, 0xcb, 0xbe, 0xb7, 0xed, 0xb6, + 0xe7, 0x8b, 0xbc, 0xdb, 0x6e, 0x8e, 0xd6, 0x6d, 0x8d, 0x14, 0xd5, 0xfa, 0x25, 0x26, 0x52, 0xba, + 0x14, 0x07, 0xb8, 0x93, 0x8f, 0x40, 0x59, 0xb6, 0x28, 0x0d, 0xe7, 0x27, 0x9e, 0x28, 0x3c, 0x5d, + 0xae, 0x4f, 0x1f, 0x1e, 0x54, 0xcb, 0xab, 0xaa, 0x10, 0x63, 0xb8, 0xbd, 0x02, 0xf3, 0xb5, 0xee, + 0x96, 0x13, 0x86, 0x4e, 0xcb, 0x0f, 0x52, 0x5d, 0xf7, 0x34, 0x94, 0xba, 0x4e, 0xaf, 0xe7, 0x7a, + 0x6d, 0xd6, 0x77, 0x8c, 0xce, 0xd4, 0xe1, 0x41, 0xb5, 0xb4, 0x2e, 0xcb, 0x50, 0x43, 0xed, 0xff, + 0x38, 0x06, 0x95, 0x9a, 0xe7, 0x74, 0xf6, 0x43, 0x37, 0xc4, 0xbe, 0x47, 0x3e, 0x07, 0x25, 0xb6, + 0x6a, 0xb5, 0x9c, 0xc8, 0x91, 0x33, 0xfd, 0x63, 0x8b, 0x62, 0x11, 0x59, 0x34, 0x17, 0x91, 0xf8, + 0xf3, 0x19, 0xf6, 0xe2, 0xde, 0xc7, 0x17, 0x6f, 0x6d, 0xdd, 0xa5, 0xcd, 0x68, 0x9d, 0x46, 0x4e, + 0x9d, 0xc8, 0x5e, 0x80, 0xb8, 0x0c, 0x35, 0x55, 0xe2, 0xc3, 0x78, 0xd8, 0xa3, 0x4d, 0x39, 0x73, + 0xd7, 0x47, 0x9c, 0x21, 0xb1, 0xe8, 0x8d, 0x1e, 0x6d, 0xd6, 0xa7, 0x24, 0xeb, 0x71, 0xf6, 0x0f, + 0x39, 0x23, 0x72, 0x0f, 0x26, 0x42, 0xbe, 0x96, 0xc9, 0x49, 0x79, 0x2b, 0x3f, 0x96, 0x9c, 0x6c, + 0x7d, 0x46, 0x32, 0x9d, 0x10, 0xff, 0x51, 0xb2, 0xb3, 0xff, 0x93, 0x05, 0x17, 0x0d, 0xec, 0x5a, + 0xd0, 0xee, 0x77, 0xa9, 0x17, 0x91, 0x27, 0x60, 0xdc, 0x73, 0xba, 0x54, 0xce, 0x2a, 0x2d, 0xf2, + 0x4d, 0xa7, 0x4b, 0x91, 0x43, 0xc8, 0x93, 0x50, 0xdc, 0x73, 0x3a, 0x7d, 0xca, 0x1b, 0xa9, 0x5c, + 0x9f, 0x96, 0x28, 0xc5, 0x37, 0x58, 0x21, 0x0a, 0x18, 0x79, 0x1b, 0xca, 0xfc, 0xc7, 0xb5, 0xc0, + 0xef, 0xe6, 0xf4, 0x69, 0x52, 0xc2, 0x37, 0x14, 0x59, 0x31, 0xfc, 0xf4, 0x5f, 0x8c, 0x19, 0xda, + 0x7f, 0x6c, 0xc1, 0xac, 0xf1, 0x71, 0x6b, 0x6e, 0x18, 0x91, 0xcf, 0x0e, 0x0c, 0x9e, 0xc5, 0x93, + 0x0d, 0x1e, 0x56, 0x9b, 0x0f, 0x9d, 0x39, 0xf9, 0xa5, 0x25, 0x55, 0x62, 0x0c, 0x1c, 0x0f, 0x8a, + 0x6e, 0x44, 0xbb, 0xe1, 0xfc, 0xd8, 0x13, 0x85, 0xa7, 0x2b, 0xcf, 0xad, 0xe6, 0xd6, 0x8d, 0x71, + 0xfb, 0xae, 0x32, 0xfa, 0x28, 0xd8, 0xd8, 0xbf, 0x55, 0x48, 0x74, 0xdf, 0xba, 0x92, 0xe3, 0x5d, + 0x0b, 0x26, 0x3a, 0xce, 0x16, 0xed, 0x88, 0xb9, 0x55, 0x79, 0xee, 0xcd, 0xdc, 0x24, 0x51, 0x3c, + 0x16, 0xd7, 0x38, 0xfd, 0xab, 0x5e, 0x14, 0xec, 0xc7, 0xc3, 0x4b, 0x14, 0xa2, 0x64, 0x4e, 0xfe, + 0xae, 0x05, 0x95, 0x78, 0x55, 0x53, 0xcd, 0xb2, 0x95, 0xbf, 0x30, 0xf1, 0x62, 0x2a, 0x25, 0xd2, + 0x4b, 0xb4, 0x01, 0x41, 0x53, 0x96, 0x85, 0x4f, 0x40, 0xc5, 0xf8, 0x04, 0x32, 0x07, 0x85, 0x5d, + 0xba, 0x2f, 0x06, 0x3c, 0xb2, 0x9f, 0xe4, 0x52, 0x62, 0x84, 0xcb, 0x21, 0xfd, 0xc9, 0xb1, 0x97, + 0xac, 0x85, 0x57, 0x60, 0x2e, 0xcd, 0xf0, 0x34, 0xf5, 0xed, 0x7f, 0x5a, 0x4c, 0x0c, 0x4c, 0xb6, + 0x10, 0x10, 0x1f, 0x26, 0xbb, 0x34, 0x0a, 0xdc, 0xa6, 0xea, 0xb2, 0x95, 0xd1, 0x5a, 0x69, 0x9d, + 0x13, 0x8b, 0x37, 0x44, 0xf1, 0x3f, 0x44, 0xc5, 0x85, 0xec, 0xc0, 0xb8, 0x13, 0xb4, 0x55, 0x9f, + 0x5c, 0xcb, 0x67, 0x5a, 0xc6, 0x4b, 0x45, 0x2d, 0x68, 0x87, 0xc8, 0x39, 0x90, 0x25, 0x28, 0x47, + 0x34, 0xe8, 0xba, 0x9e, 0x13, 0x89, 0x1d, 0xb4, 0x54, 0xbf, 0x20, 0xd1, 0xca, 0x9b, 0x0a, 0x80, + 0x31, 0x0e, 0xe9, 0xc0, 0x44, 0x2b, 0xd8, 0xc7, 0xbe, 0x37, 0x3f, 0x9e, 0x47, 0x53, 0xac, 0x70, + 0x5a, 0xf1, 0x20, 0x15, 0xff, 0x51, 0xf2, 0x20, 0xbf, 0x6a, 0xc1, 0xa5, 0x2e, 0x75, 0xc2, 0x7e, + 0x40, 0xd9, 0x27, 0x20, 0x8d, 0xa8, 0xc7, 0x3a, 0x76, 0xbe, 0xc8, 0x99, 0xe3, 0xa8, 0xfd, 0x30, + 0x48, 0xb9, 0xfe, 0x98, 0x14, 0xe5, 0x52, 0x16, 0x14, 0x33, 0xa5, 0x21, 0x6f, 0x43, 0x25, 0x8a, + 0x3a, 0x8d, 0x88, 0xe9, 0xc1, 0xed, 0xfd, 0xf9, 0x09, 0xbe, 0x78, 0x8d, 0xb8, 0xc2, 0x6c, 0x6e, + 0xae, 0x29, 0x82, 0xf5, 0x59, 0x36, 0x5b, 0x8c, 0x02, 0x34, 0xd9, 0xd9, 0xff, 0xa2, 0x08, 0x17, + 0x06, 0xb6, 0x15, 0xf2, 0x02, 0x14, 0x7b, 0x3b, 0x4e, 0xa8, 0xf6, 0x89, 0x2b, 0x6a, 0x91, 0xda, + 0x60, 0x85, 0xef, 0x1d, 0x54, 0xa7, 0x55, 0x15, 0x5e, 0x80, 0x02, 0x99, 0x69, 0x6d, 0x5d, 0x1a, + 0x86, 0x4e, 0x5b, 0x6d, 0x1e, 0xc6, 0x20, 0xe5, 0xc5, 0xa8, 0xe0, 0xe4, 0xe7, 0x2d, 0x98, 0x16, + 0x03, 0x16, 0x69, 0xd8, 0xef, 0x44, 0x6c, 0x83, 0x64, 0x9d, 0x72, 0x23, 0x8f, 0xc9, 0x21, 0x48, + 0xd6, 0x2f, 0x4b, 0xee, 0xd3, 0x66, 0x69, 0x88, 0x49, 0xbe, 0xe4, 0x0e, 0x94, 0xc3, 0xc8, 0x09, + 0x22, 0xda, 0xaa, 0x45, 0x5c, 0x95, 0xab, 0x3c, 0xf7, 0x93, 0x27, 0xdb, 0x39, 0x36, 0xdd, 0x2e, + 0x15, 0xbb, 0x54, 0x43, 0x11, 0xc0, 0x98, 0x16, 0x79, 0x1b, 0x20, 0xe8, 0x7b, 0x8d, 0x7e, 0xb7, + 0xeb, 0x04, 0xfb, 0x52, 0xbb, 0xbb, 0x3e, 0xda, 0xe7, 0xa1, 0xa6, 0x17, 0x2b, 0x3a, 0x71, 0x19, + 0x1a, 0xfc, 0xc8, 0x97, 0x2c, 0x98, 0x16, 0xf3, 0x40, 0x49, 0x30, 0x91, 0xb3, 0x04, 0x17, 0x58, + 0xd3, 0xae, 0x98, 0x2c, 0x30, 0xc9, 0x91, 0xbc, 0x09, 0x95, 0xa6, 0xdf, 0xed, 0x75, 0xa8, 0x68, + 0xdc, 0xc9, 0x53, 0x37, 0x2e, 0x1f, 0xba, 0xcb, 0x31, 0x09, 0x34, 0xe9, 0xd9, 0xff, 0x3e, 0xa9, + 0xe3, 0xa8, 0x21, 0x4d, 0x3e, 0x03, 0x8f, 0x84, 0xfd, 0x66, 0x93, 0x86, 0xe1, 0x76, 0xbf, 0x83, + 0x7d, 0xef, 0xba, 0x1b, 0x46, 0x7e, 0xb0, 0xbf, 0xe6, 0x76, 0xdd, 0x88, 0x0f, 0xe8, 0x62, 0xfd, + 0xf1, 0xc3, 0x83, 0xea, 0x23, 0x8d, 0x61, 0x48, 0x38, 0xbc, 0x3e, 0x71, 0xe0, 0xd1, 0xbe, 0x37, + 0x9c, 0xbc, 0x38, 0x7e, 0x54, 0x0f, 0x0f, 0xaa, 0x8f, 0xde, 0x1e, 0x8e, 0x86, 0x47, 0xd1, 0xb0, + 0xff, 0xd4, 0x62, 0xdb, 0x90, 0xf8, 0xae, 0x4d, 0xda, 0xed, 0x75, 0xd8, 0xd2, 0x79, 0xfe, 0xca, + 0x71, 0x94, 0x50, 0x8e, 0x31, 0x9f, 0xbd, 0x5c, 0xc9, 0x3f, 0x4c, 0x43, 0xb6, 0xff, 0xab, 0x05, + 0x97, 0xd2, 0xc8, 0x0f, 0x40, 0xa1, 0x0b, 0x93, 0x0a, 0xdd, 0xcd, 0x7c, 0xbf, 0x76, 0x88, 0x56, + 0xf7, 0x0b, 0xc6, 0x80, 0x55, 0xa8, 0x48, 0xb7, 0xc9, 0x4b, 0x30, 0x15, 0xc9, 0xbf, 0x37, 0x63, + 0xe5, 0x5c, 0x1b, 0x26, 0x36, 0x0d, 0x18, 0x26, 0x30, 0x59, 0xcd, 0x66, 0xa7, 0x1f, 0x46, 0x34, + 0x68, 0x34, 0xfd, 0x9e, 0x58, 0x76, 0x4b, 0x71, 0xcd, 0x65, 0x03, 0x86, 0x09, 0x4c, 0xfb, 0x6f, + 0x16, 0x07, 0xdb, 0xfd, 0xff, 0x75, 0x7d, 0x25, 0x56, 0x3f, 0x0a, 0xef, 0xa7, 0xfa, 0x31, 0xfe, + 0x81, 0x52, 0x3f, 0xbe, 0x6c, 0x31, 0x2d, 0x4e, 0x0c, 0x80, 0x50, 0xaa, 0x46, 0xaf, 0xe7, 0x3b, + 0x1d, 0x90, 0x6e, 0x9b, 0x8a, 0xa1, 0xe4, 0x85, 0x31, 0x5b, 0xfb, 0x1f, 0x8d, 0xc3, 0x54, 0xcd, + 0x8b, 0xdc, 0xda, 0xf6, 0xb6, 0xeb, 0xb9, 0xd1, 0x3e, 0xf9, 0xc6, 0x18, 0x2c, 0xf5, 0x02, 0xba, + 0x4d, 0x83, 0x80, 0xb6, 0x56, 0xfa, 0x81, 0xeb, 0xb5, 0x1b, 0xcd, 0x1d, 0xda, 0xea, 0x77, 0x5c, + 0xaf, 0xbd, 0xda, 0xf6, 0x7c, 0x5d, 0x7c, 0xf5, 0x3e, 0x6d, 0xf6, 0x79, 0xbb, 0x8a, 0x55, 0xa2, + 0x3b, 0x9a, 0xec, 0x1b, 0xa7, 0x63, 0x5a, 0x7f, 0xfe, 0xf0, 0xa0, 0xba, 0x74, 0xca, 0x4a, 0x78, + 0xda, 0x4f, 0x23, 0x5f, 0x1b, 0x83, 0xc5, 0x80, 0x7e, 0xbe, 0xef, 0x9e, 0xbc, 0x35, 0xc4, 0x32, + 0xde, 0x19, 0x71, 0xbb, 0x3f, 0x15, 0xcf, 0xfa, 0x73, 0x87, 0x07, 0xd5, 0x53, 0xd6, 0xc1, 0x53, + 0x7e, 0x97, 0xbd, 0x01, 0x95, 0x5a, 0xcf, 0x0d, 0xdd, 0xfb, 0xe8, 0xf7, 0x23, 0x7a, 0x02, 0x83, + 0x46, 0x15, 0x8a, 0x41, 0xbf, 0x43, 0xc5, 0x02, 0x53, 0xae, 0x97, 0xd9, 0xb2, 0x8c, 0xac, 0x00, + 0x45, 0xb9, 0xfd, 0x65, 0xb6, 0x05, 0x71, 0x92, 0x29, 0x53, 0xd6, 0x5d, 0x28, 0x06, 0x8c, 0x89, + 0x1c, 0x59, 0xa3, 0x9e, 0xfa, 0x63, 0xa9, 0xa5, 0x10, 0xec, 0x27, 0x0a, 0x16, 0xf6, 0x77, 0xc7, + 0xe0, 0x72, 0xad, 0xd7, 0x5b, 0xa7, 0xe1, 0x4e, 0x4a, 0x8a, 0x5f, 0xb4, 0x60, 0x66, 0xcf, 0x0d, + 0xa2, 0xbe, 0xd3, 0x51, 0xd6, 0x4a, 0x21, 0x4f, 0x63, 0x54, 0x79, 0x38, 0xb7, 0x37, 0x12, 0xa4, + 0xeb, 0xe4, 0xf0, 0xa0, 0x3a, 0x93, 0x2c, 0xc3, 0x14, 0x7b, 0xf2, 0x77, 0x2c, 0x98, 0x93, 0x45, + 0x37, 0xfd, 0x16, 0x35, 0xad, 0xe1, 0xb7, 0xf3, 0x94, 0x49, 0x13, 0x17, 0x56, 0xcc, 0x74, 0x29, + 0x0e, 0x08, 0x61, 0xff, 0xf7, 0x31, 0x78, 0x78, 0x08, 0x0d, 0xf2, 0x6b, 0x16, 0x5c, 0x12, 0x26, + 0x74, 0x03, 0x84, 0x74, 0x5b, 0xb6, 0xe6, 0x4f, 0xe7, 0x2d, 0x39, 0xb2, 0x29, 0x4e, 0xbd, 0x26, + 0xad, 0xcf, 0xb3, 0x25, 0x79, 0x39, 0x83, 0x35, 0x66, 0x0a, 0xc4, 0x25, 0x15, 0x46, 0xf5, 0x94, + 0xa4, 0x63, 0x0f, 0x44, 0xd2, 0x46, 0x06, 0x6b, 0xcc, 0x14, 0xc8, 0xfe, 0xeb, 0xf0, 0xe8, 0x11, + 0xe4, 0x8e, 0x9f, 0x9c, 0xf6, 0x9b, 0x7a, 0xd4, 0x27, 0xc7, 0xdc, 0x09, 0xe6, 0xb5, 0x0d, 0x13, + 0x7c, 0xea, 0xa8, 0x89, 0x0d, 0x6c, 0x0f, 0xe6, 0x73, 0x2a, 0x44, 0x09, 0xb1, 0xbf, 0x6b, 0x41, + 0xe9, 0x14, 0xb6, 0xcf, 0x6a, 0xd2, 0xf6, 0x59, 0x1e, 0xb0, 0x7b, 0x46, 0x83, 0x76, 0xcf, 0x57, + 0x47, 0xeb, 0x8d, 0x93, 0xd8, 0x3b, 0x7f, 0x64, 0xc1, 0x85, 0x01, 0xfb, 0x28, 0xd9, 0x81, 0x4b, + 0x3d, 0xbf, 0xa5, 0xb6, 0xd3, 0xeb, 0x4e, 0xb8, 0xc3, 0x61, 0xf2, 0xf3, 0x5e, 0x60, 0x3d, 0xb9, + 0x91, 0x01, 0x7f, 0xef, 0xa0, 0x3a, 0xaf, 0x89, 0xa4, 0x10, 0x30, 0x93, 0x22, 0xe9, 0x41, 0x69, + 0xdb, 0xa5, 0x9d, 0x56, 0x3c, 0x04, 0x47, 0xd4, 0xd2, 0xae, 0x49, 0x6a, 0xe2, 0x6a, 0x40, 0xfd, + 0x43, 0xcd, 0xc5, 0xfe, 0xb1, 0x05, 0x33, 0xb5, 0x7e, 0xb4, 0xc3, 0x74, 0x94, 0x26, 0xb7, 0xc6, + 0x11, 0x0f, 0x8a, 0xa1, 0xdb, 0xde, 0x7b, 0x21, 0x9f, 0xc5, 0xb8, 0xc1, 0x48, 0xc9, 0x2b, 0x12, + 0xad, 0xac, 0xf3, 0x42, 0x14, 0x6c, 0x48, 0x00, 0x13, 0xbe, 0xd3, 0x8f, 0x76, 0x9e, 0x93, 0x9f, + 0x3c, 0xa2, 0x65, 0xe2, 0x16, 0xfb, 0x9c, 0xe7, 0x24, 0x47, 0xad, 0x32, 0x8a, 0x52, 0x94, 0x9c, + 0xec, 0x2f, 0xc2, 0x4c, 0xf2, 0xde, 0xed, 0x04, 0x63, 0xf6, 0x71, 0x28, 0x38, 0x81, 0x27, 0x47, + 0x6c, 0x45, 0x22, 0x14, 0x6a, 0x78, 0x13, 0x59, 0x39, 0x79, 0x16, 0x4a, 0xdb, 0xfd, 0x4e, 0x87, + 0x9f, 0x2b, 0xc4, 0x25, 0x97, 0x3e, 0x16, 0x5d, 0x93, 0xe5, 0xa8, 0x31, 0xec, 0xff, 0x3d, 0x0e, + 0xb3, 0xf5, 0x4e, 0x9f, 0xbe, 0x1a, 0x50, 0xaa, 0x6c, 0x41, 0x35, 0x98, 0xed, 0x05, 0x74, 0xcf, + 0xa5, 0xf7, 0x1a, 0xb4, 0x43, 0x9b, 0x91, 0x1f, 0x48, 0x69, 0x1e, 0x96, 0x84, 0x66, 0x37, 0x92, + 0x60, 0x4c, 0xe3, 0x93, 0x57, 0x60, 0xc6, 0x69, 0x46, 0xee, 0x1e, 0xd5, 0x14, 0x84, 0xb8, 0x0f, + 0x49, 0x0a, 0x33, 0xb5, 0x04, 0x14, 0x53, 0xd8, 0xe4, 0xb3, 0x30, 0x1f, 0x36, 0x9d, 0x0e, 0xbd, + 0xdd, 0x93, 0xac, 0x96, 0x77, 0x68, 0x73, 0x77, 0xc3, 0x77, 0xbd, 0x48, 0xda, 0x1d, 0x9f, 0x90, + 0x94, 0xe6, 0x1b, 0x43, 0xf0, 0x70, 0x28, 0x05, 0xf2, 0x2f, 0x2d, 0x78, 0xbc, 0x17, 0xd0, 0x8d, + 0xc0, 0xef, 0xfa, 0x6c, 0xa8, 0x0d, 0x98, 0xc3, 0xa4, 0x59, 0xe8, 0x8d, 0x11, 0x75, 0x29, 0x51, + 0x32, 0x78, 0x87, 0xf3, 0xe1, 0xc3, 0x83, 0xea, 0xe3, 0x1b, 0x47, 0x09, 0x80, 0x47, 0xcb, 0x47, + 0xfe, 0x95, 0x05, 0x57, 0x7a, 0x7e, 0x18, 0x1d, 0xf1, 0x09, 0xc5, 0x73, 0xfd, 0x04, 0xfb, 0xf0, + 0xa0, 0x7a, 0x65, 0xe3, 0x48, 0x09, 0xf0, 0x18, 0x09, 0xed, 0xc3, 0x0a, 0x5c, 0x30, 0xc6, 0x9e, + 0x34, 0xe6, 0xbc, 0x0c, 0xd3, 0x6a, 0x30, 0xc4, 0xba, 0x4f, 0x39, 0xb6, 0xed, 0xd5, 0x4c, 0x20, + 0x26, 0x71, 0xd9, 0xb8, 0xd3, 0x43, 0x51, 0xd4, 0x4e, 0x8d, 0xbb, 0x8d, 0x04, 0x14, 0x53, 0xd8, + 0x64, 0x15, 0x2e, 0xca, 0x12, 0xa4, 0xbd, 0x8e, 0xdb, 0x74, 0x96, 0xfd, 0xbe, 0x1c, 0x72, 0xc5, + 0xfa, 0xc3, 0x87, 0x07, 0xd5, 0x8b, 0x1b, 0x83, 0x60, 0xcc, 0xaa, 0x43, 0xd6, 0xe0, 0x92, 0xd3, + 0x8f, 0x7c, 0xfd, 0xfd, 0x57, 0x3d, 0xb6, 0x9d, 0xb6, 0xf8, 0xd0, 0x2a, 0x89, 0x7d, 0xb7, 0x96, + 0x01, 0xc7, 0xcc, 0x5a, 0x64, 0x23, 0x45, 0xad, 0x41, 0x9b, 0xbe, 0xd7, 0x12, 0xbd, 0x5c, 0x8c, + 0x8f, 0x81, 0xb5, 0x0c, 0x1c, 0xcc, 0xac, 0x49, 0x3a, 0x30, 0xd3, 0x75, 0xee, 0xdf, 0xf6, 0x9c, + 0x3d, 0xc7, 0xed, 0x30, 0x26, 0xd2, 0x5e, 0x38, 0xdc, 0xca, 0xd4, 0x8f, 0xdc, 0xce, 0xa2, 0xf0, + 0xe3, 0x58, 0x5c, 0xf5, 0xa2, 0x5b, 0x41, 0x23, 0x62, 0x9a, 0xba, 0xd0, 0x20, 0xd7, 0x13, 0xb4, + 0x30, 0x45, 0x9b, 0xdc, 0x82, 0xcb, 0x7c, 0x3a, 0xae, 0xf8, 0xf7, 0xbc, 0x15, 0xda, 0x71, 0xf6, + 0xd5, 0x07, 0x4c, 0xf2, 0x0f, 0x78, 0xe4, 0xf0, 0xa0, 0x7a, 0xb9, 0x91, 0x85, 0x80, 0xd9, 0xf5, + 0x88, 0x03, 0x8f, 0x26, 0x01, 0x48, 0xf7, 0xdc, 0xd0, 0xf5, 0x3d, 0x61, 0x96, 0x2b, 0xc5, 0x66, + 0xb9, 0xc6, 0x70, 0x34, 0x3c, 0x8a, 0x06, 0xf9, 0x7b, 0x16, 0x5c, 0xca, 0x9a, 0x86, 0xf3, 0xe5, + 0x3c, 0x6e, 0x93, 0x53, 0x53, 0x4b, 0x8c, 0x88, 0xcc, 0x45, 0x21, 0x53, 0x08, 0xf2, 0x8e, 0x05, + 0x53, 0x8e, 0x71, 0x82, 0x9e, 0x87, 0x3c, 0x76, 0x2d, 0xf3, 0x4c, 0x5e, 0x9f, 0x3b, 0x3c, 0xa8, + 0x26, 0x4e, 0xe9, 0x98, 0xe0, 0x48, 0xfe, 0x81, 0x05, 0x97, 0x33, 0xe7, 0xf8, 0x7c, 0xe5, 0x3c, + 0x5a, 0x88, 0x0f, 0x92, 0xec, 0x35, 0x27, 0x5b, 0x0c, 0xf2, 0x2d, 0x4b, 0x6f, 0x65, 0xea, 0x82, + 0x71, 0x7e, 0x8a, 0x8b, 0x36, 0xa2, 0xc1, 0xc3, 0x50, 0xa3, 0x14, 0xe1, 0xfa, 0x45, 0x63, 0x67, + 0x54, 0x85, 0x98, 0x66, 0x4f, 0xbe, 0x69, 0xa9, 0xad, 0x51, 0x4b, 0x34, 0x7d, 0x5e, 0x12, 0x91, + 0x78, 0xa7, 0xd5, 0x02, 0xa5, 0x98, 0x93, 0x9f, 0x81, 0x05, 0x67, 0xcb, 0x0f, 0xa2, 0xcc, 0xc9, + 0x37, 0x3f, 0xc3, 0xa7, 0xd1, 0x95, 0xc3, 0x83, 0xea, 0x42, 0x6d, 0x28, 0x16, 0x1e, 0x41, 0xc1, + 0xfe, 0x8d, 0x22, 0x4c, 0x89, 0x93, 0x90, 0xdc, 0xba, 0x7e, 0xdb, 0x82, 0xc7, 0x9a, 0xfd, 0x20, + 0xa0, 0x5e, 0xd4, 0x88, 0x68, 0x6f, 0x70, 0xe3, 0xb2, 0xce, 0x75, 0xe3, 0x7a, 0xe2, 0xf0, 0xa0, + 0xfa, 0xd8, 0xf2, 0x11, 0xfc, 0xf1, 0x48, 0xe9, 0xc8, 0xbf, 0xb3, 0xc0, 0x96, 0x08, 0x75, 0xa7, + 0xb9, 0xdb, 0x0e, 0xfc, 0xbe, 0xd7, 0x1a, 0xfc, 0x88, 0xb1, 0x73, 0xfd, 0x88, 0xa7, 0x0e, 0x0f, + 0xaa, 0xf6, 0xf2, 0xb1, 0x52, 0xe0, 0x09, 0x24, 0x25, 0xaf, 0xc2, 0x05, 0x89, 0x75, 0xf5, 0x7e, + 0x8f, 0x06, 0x2e, 0x3b, 0x73, 0x48, 0xc5, 0x31, 0xf6, 0x4d, 0x4b, 0x23, 0xe0, 0x60, 0x1d, 0x12, + 0xc2, 0xe4, 0x3d, 0xea, 0xb6, 0x77, 0x22, 0xa5, 0x3e, 0x8d, 0xe8, 0x90, 0x26, 0xad, 0x22, 0x77, + 0x04, 0xcd, 0x7a, 0xe5, 0xf0, 0xa0, 0x3a, 0x29, 0xff, 0xa0, 0xe2, 0x44, 0x6e, 0xc2, 0x8c, 0x38, + 0xa7, 0x6e, 0xb8, 0x5e, 0x7b, 0xc3, 0xf7, 0x84, 0x57, 0x55, 0xb9, 0xfe, 0x94, 0xda, 0xf0, 0x1b, + 0x09, 0xe8, 0x7b, 0x07, 0xd5, 0x29, 0xf5, 0x7b, 0x73, 0xbf, 0x47, 0x31, 0x55, 0xdb, 0xfe, 0xbd, + 0x09, 0x00, 0x35, 0x5c, 0x69, 0x8f, 0x7c, 0x04, 0xca, 0x21, 0x8d, 0x04, 0x57, 0x79, 0x93, 0x24, + 0xee, 0xff, 0x54, 0x21, 0xc6, 0x70, 0xb2, 0x0b, 0xc5, 0x9e, 0xd3, 0x0f, 0x69, 0x3e, 0xe7, 0x07, + 0xd9, 0xf9, 0x1b, 0x8c, 0xa2, 0x38, 0x98, 0xf2, 0x9f, 0x28, 0x78, 0x90, 0xaf, 0x58, 0x00, 0x34, + 0xd9, 0x61, 0x23, 0x1b, 0x88, 0x24, 0xcb, 0xb8, 0x4f, 0x59, 0x1b, 0xd4, 0x67, 0x0e, 0x0f, 0xaa, + 0x60, 0x74, 0xbd, 0xc1, 0x96, 0xdc, 0x83, 0x92, 0xa3, 0xd6, 0xfc, 0xf1, 0xf3, 0x58, 0xf3, 0xf9, + 0x79, 0x51, 0x0f, 0x5a, 0xcd, 0x8c, 0x7c, 0xcd, 0x82, 0x99, 0x90, 0x46, 0xb2, 0xab, 0xd8, 0xca, + 0x23, 0x15, 0xde, 0x11, 0x07, 0x5d, 0x23, 0x41, 0x53, 0xac, 0xa0, 0xc9, 0x32, 0x4c, 0xf1, 0x55, + 0xa2, 0x5c, 0xa7, 0x4e, 0x8b, 0x06, 0xdc, 0x1c, 0x21, 0x35, 0xa9, 0xd1, 0x45, 0x31, 0x68, 0x6a, + 0x51, 0x8c, 0x32, 0x4c, 0xf1, 0x55, 0xa2, 0xac, 0xbb, 0x41, 0xe0, 0x4b, 0x51, 0x4a, 0x39, 0x89, + 0x62, 0xd0, 0xd4, 0xa2, 0x18, 0x65, 0x98, 0xe2, 0x6b, 0x7f, 0x67, 0x1a, 0x66, 0xd4, 0x44, 0x8a, + 0x35, 0x7b, 0x61, 0xfd, 0x1a, 0xa2, 0xd9, 0x2f, 0x9b, 0x40, 0x4c, 0xe2, 0xb2, 0xca, 0x62, 0xaa, + 0x26, 0x15, 0x7b, 0x5d, 0xb9, 0x61, 0x02, 0x31, 0x89, 0x4b, 0xba, 0x50, 0x0c, 0x23, 0xda, 0x53, + 0x3e, 0x07, 0x23, 0x5e, 0x89, 0xc7, 0xeb, 0x83, 0x61, 0x49, 0x60, 0xe4, 0x51, 0x70, 0xe1, 0x06, + 0xdc, 0x28, 0x61, 0xd3, 0x95, 0x93, 0x23, 0x9f, 0xf9, 0x99, 0x34, 0x17, 0x8b, 0xde, 0x48, 0x96, + 0x61, 0x8a, 0x7d, 0x86, 0xb2, 0x5f, 0x3c, 0x47, 0x65, 0xff, 0xd3, 0x50, 0xea, 0x3a, 0xf7, 0x1b, + 0xfd, 0xa0, 0x7d, 0xf6, 0x43, 0x85, 0xf4, 0x21, 0x15, 0x54, 0x50, 0xd3, 0x23, 0x5f, 0xb2, 0x8c, + 0x25, 0x47, 0x38, 0x18, 0xdc, 0xc9, 0x77, 0xc9, 0xd1, 0x7b, 0xe5, 0xd0, 0xc5, 0x67, 0x40, 0xf5, + 0x2e, 0x3d, 0x70, 0xd5, 0x9b, 0xa9, 0x91, 0x62, 0x82, 0x68, 0x35, 0xb2, 0x7c, 0xae, 0x6a, 0xe4, + 0x72, 0x82, 0x19, 0xa6, 0x98, 0x73, 0x79, 0xc4, 0x9c, 0xd3, 0xf2, 0xc0, 0xb9, 0xca, 0xd3, 0x48, + 0x30, 0xc3, 0x14, 0xf3, 0xe1, 0xe7, 0xcd, 0xca, 0xf9, 0x9c, 0x37, 0xa7, 0x72, 0x38, 0x6f, 0x1e, + 0xad, 0x8a, 0x4f, 0x8f, 0xaa, 0x8a, 0x93, 0x1b, 0x40, 0x5a, 0xfb, 0x9e, 0xd3, 0x75, 0x9b, 0x72, + 0xb1, 0xe4, 0xdb, 0xe6, 0x0c, 0xb7, 0x47, 0x2c, 0xc8, 0x85, 0x8c, 0xac, 0x0c, 0x60, 0x60, 0x46, + 0x2d, 0x12, 0x41, 0xa9, 0xa7, 0x34, 0xae, 0xd9, 0x3c, 0x46, 0xbf, 0xd2, 0xc0, 0x84, 0xdf, 0x08, + 0x9b, 0x78, 0xaa, 0x04, 0x35, 0x27, 0xb2, 0x06, 0x97, 0xba, 0xae, 0xb7, 0xe1, 0xb7, 0xc2, 0x0d, + 0x1a, 0x48, 0x6b, 0x4b, 0x83, 0x46, 0xf3, 0x73, 0xbc, 0x6d, 0xf8, 0x09, 0x7a, 0x3d, 0x03, 0x8e, + 0x99, 0xb5, 0xec, 0xff, 0x65, 0xc1, 0xdc, 0x72, 0xc7, 0xef, 0xb7, 0xee, 0x38, 0x51, 0x73, 0x47, + 0xb8, 0x29, 0x90, 0x57, 0xa0, 0xe4, 0x7a, 0x11, 0x0d, 0xf6, 0x9c, 0x8e, 0xdc, 0x9f, 0x6c, 0x65, + 0x3e, 0x5d, 0x95, 0xe5, 0xef, 0x1d, 0x54, 0x67, 0x56, 0xfa, 0x01, 0xb7, 0x52, 0x8b, 0xd5, 0x0a, + 0x75, 0x1d, 0xf2, 0x1d, 0x0b, 0x2e, 0x08, 0x47, 0x87, 0x15, 0x27, 0x72, 0x5e, 0xef, 0xd3, 0xc0, + 0xa5, 0xca, 0xd5, 0x61, 0xc4, 0x85, 0x2a, 0x2d, 0xab, 0x62, 0xb0, 0x1f, 0x2b, 0xea, 0xeb, 0x69, + 0xce, 0x38, 0x28, 0x8c, 0xfd, 0x4b, 0x05, 0x78, 0x64, 0x28, 0x2d, 0xb2, 0x00, 0x63, 0x6e, 0x4b, + 0x7e, 0x3a, 0x48, 0xba, 0x63, 0xab, 0x2d, 0x1c, 0x73, 0x5b, 0x64, 0x91, 0xeb, 0x9c, 0x01, 0x0d, + 0x43, 0x75, 0xe1, 0x5c, 0xd6, 0xea, 0xa1, 0x2c, 0x45, 0x03, 0x83, 0x54, 0xa1, 0xc8, 0xfd, 0x87, + 0xe5, 0x79, 0x82, 0x6b, 0xb1, 0xdc, 0x55, 0x17, 0x45, 0x39, 0xf9, 0xb2, 0x05, 0x20, 0x04, 0x64, + 0xa7, 0x11, 0xb9, 0x4b, 0x62, 0xbe, 0xcd, 0xc4, 0x28, 0x0b, 0x29, 0xe3, 0xff, 0x68, 0x70, 0x25, + 0x9b, 0x30, 0xc1, 0x14, 0x5a, 0xbf, 0x75, 0xe6, 0x4d, 0x91, 0xdf, 0x44, 0x6d, 0x70, 0x1a, 0x28, + 0x69, 0xb1, 0xb6, 0x0a, 0x68, 0xd4, 0x0f, 0x3c, 0xd6, 0xb4, 0x7c, 0x1b, 0x2c, 0x09, 0x29, 0x50, + 0x97, 0xa2, 0x81, 0x61, 0xff, 0xf3, 0x31, 0xb8, 0x94, 0x25, 0x3a, 0xdb, 0x6d, 0x26, 0x84, 0xb4, + 0xf2, 0x68, 0xfc, 0x53, 0xf9, 0xb7, 0x8f, 0xf4, 0xd9, 0xd1, 0xd7, 0x14, 0xd2, 0x81, 0x52, 0xf2, + 0x25, 0x3f, 0xa5, 0x5b, 0x68, 0xec, 0x8c, 0x2d, 0xa4, 0x29, 0xa7, 0x5a, 0xe9, 0x09, 0x18, 0x0f, + 0x59, 0xcf, 0x17, 0x92, 0xd7, 0x1d, 0xbc, 0x8f, 0x38, 0x84, 0x61, 0xf4, 0x3d, 0x37, 0x92, 0x41, + 0x37, 0x1a, 0xe3, 0xb6, 0xe7, 0x46, 0xc8, 0x21, 0xf6, 0xb7, 0xc7, 0x60, 0x61, 0xf8, 0x47, 0x91, + 0x6f, 0x5b, 0x00, 0x2d, 0x76, 0x5c, 0x09, 0xb9, 0xe7, 0xba, 0xf0, 0x71, 0x72, 0xce, 0xab, 0x0d, + 0x57, 0x14, 0xa7, 0xd8, 0xf9, 0x4e, 0x17, 0x85, 0x68, 0x08, 0x42, 0x9e, 0x53, 0x43, 0x9f, 0x5f, + 0xd5, 0x88, 0xc9, 0xa4, 0xeb, 0xac, 0x6b, 0x08, 0x1a, 0x58, 0xec, 0x3c, 0xea, 0x39, 0x5d, 0x1a, + 0xf6, 0x1c, 0x1d, 0xc2, 0xc4, 0xcf, 0xa3, 0x37, 0x55, 0x21, 0xc6, 0x70, 0xbb, 0x03, 0x4f, 0x9e, + 0x40, 0xce, 0x9c, 0x22, 0x44, 0xec, 0xff, 0x61, 0xc1, 0xc3, 0xd2, 0xfd, 0xec, 0xff, 0x1b, 0x5f, + 0xc6, 0x3f, 0xb3, 0xe0, 0xd1, 0x21, 0xdf, 0xfc, 0x00, 0x5c, 0x1a, 0xdf, 0x4a, 0xba, 0x34, 0xde, + 0x1e, 0x75, 0x48, 0x67, 0x7e, 0xc7, 0x10, 0xcf, 0xc6, 0x7f, 0x53, 0x80, 0x69, 0xb6, 0x6c, 0xb5, + 0xfc, 0x76, 0x4e, 0x1b, 0xe7, 0x93, 0x50, 0xfc, 0x3c, 0xdb, 0x80, 0xd2, 0x83, 0x8c, 0xef, 0x4a, + 0x28, 0x60, 0xe4, 0x2b, 0x16, 0x4c, 0x7e, 0x5e, 0xee, 0xa9, 0xe2, 0x2c, 0x37, 0xe2, 0x62, 0x98, + 0xf8, 0x86, 0x45, 0xb9, 0x43, 0x8a, 0xc0, 0x13, 0xed, 0xc0, 0xa8, 0xb6, 0x52, 0xc5, 0x99, 0x3c, + 0x03, 0x93, 0xdb, 0x7e, 0xd0, 0xed, 0x77, 0x9c, 0x74, 0xb4, 0xe3, 0x35, 0x51, 0x8c, 0x0a, 0xce, + 0x26, 0xb9, 0xd3, 0x73, 0xdf, 0xa0, 0x41, 0x28, 0xe2, 0x10, 0x12, 0x93, 0xbc, 0xa6, 0x21, 0x68, + 0x60, 0xf1, 0x3a, 0xed, 0x76, 0x40, 0xdb, 0x4e, 0xe4, 0x07, 0x7c, 0xe7, 0x30, 0xeb, 0x68, 0x08, + 0x1a, 0x58, 0x0b, 0x9f, 0x84, 0x29, 0x53, 0xf8, 0x53, 0x05, 0xb1, 0x7c, 0x0a, 0xa4, 0x27, 0x63, + 0x6a, 0x49, 0xb2, 0x4e, 0xb2, 0x24, 0xd9, 0xff, 0x61, 0x0c, 0x0c, 0xeb, 0xd0, 0x03, 0x98, 0xea, + 0x5e, 0x62, 0xaa, 0x8f, 0x68, 0xd9, 0x30, 0x6c, 0x5d, 0xc3, 0x42, 0xfa, 0xf6, 0x52, 0x21, 0x7d, + 0x37, 0x73, 0xe3, 0x78, 0x74, 0x44, 0xdf, 0x0f, 0x2c, 0x78, 0x34, 0x46, 0x1e, 0x34, 0xdc, 0x1e, + 0xbf, 0x6e, 0xbf, 0x08, 0x15, 0x27, 0xae, 0x26, 0x27, 0x96, 0x11, 0x4f, 0xa5, 0x41, 0x68, 0xe2, + 0xc5, 0xb1, 0x20, 0x85, 0x33, 0xc6, 0x82, 0x8c, 0x1f, 0x1d, 0x0b, 0x62, 0xff, 0x78, 0x0c, 0x1e, + 0x1f, 0xfc, 0x32, 0xd3, 0x41, 0xfa, 0xf8, 0x6f, 0x4b, 0xbb, 0x50, 0x8f, 0x9d, 0xd9, 0x85, 0xba, + 0x70, 0x52, 0x17, 0x6a, 0xed, 0xb8, 0x3c, 0x7e, 0xee, 0x8e, 0xcb, 0x0d, 0xb8, 0xac, 0xbc, 0x24, + 0xaf, 0xf9, 0x81, 0x0c, 0x88, 0x50, 0x2b, 0x48, 0xa9, 0xfe, 0xb8, 0xac, 0x72, 0x19, 0xb3, 0x90, + 0x30, 0xbb, 0xae, 0xfd, 0x83, 0x02, 0x5c, 0x8c, 0x9b, 0x7d, 0xd9, 0xf7, 0x5a, 0x2e, 0x77, 0xb4, + 0x79, 0x19, 0xc6, 0xa3, 0xfd, 0x9e, 0x6a, 0xec, 0xbf, 0xac, 0xc4, 0xd9, 0xdc, 0xef, 0xb1, 0xde, + 0x7e, 0x38, 0xa3, 0x0a, 0x37, 0x9d, 0xf3, 0x4a, 0x64, 0x4d, 0xcf, 0x0e, 0xd1, 0x03, 0x2f, 0x24, + 0x47, 0xf3, 0x7b, 0x07, 0xd5, 0x8c, 0xd4, 0x06, 0x8b, 0x9a, 0x52, 0x72, 0xcc, 0x93, 0xbb, 0x30, + 0xd3, 0x71, 0xc2, 0xe8, 0x76, 0xaf, 0xe5, 0x44, 0x74, 0xd3, 0x95, 0x2e, 0x2c, 0xa7, 0x8b, 0x21, + 0xd1, 0x77, 0xfd, 0x6b, 0x09, 0x4a, 0x98, 0xa2, 0x4c, 0xf6, 0x80, 0xb0, 0x92, 0xcd, 0xc0, 0xf1, + 0x42, 0xf1, 0x55, 0x8c, 0xdf, 0xe9, 0x03, 0x82, 0xf4, 0xd1, 0x79, 0x6d, 0x80, 0x1a, 0x66, 0x70, + 0x20, 0x4f, 0xc1, 0x44, 0x40, 0x9d, 0x50, 0x6f, 0x07, 0x7a, 0xfe, 0x23, 0x2f, 0x45, 0x09, 0x35, + 0x27, 0xd4, 0xc4, 0x31, 0x13, 0xea, 0x8f, 0x2c, 0x98, 0x89, 0xbb, 0xe9, 0x01, 0xa8, 0x1e, 0xdd, + 0xa4, 0xea, 0x71, 0x3d, 0xaf, 0x25, 0x71, 0x88, 0xb6, 0xf1, 0xa7, 0x93, 0xe6, 0xf7, 0xf1, 0xa8, + 0x85, 0x2f, 0x98, 0x4e, 0xec, 0x56, 0x1e, 0xa1, 0x64, 0x09, 0x6d, 0xef, 0x48, 0xef, 0x75, 0xa6, + 0xeb, 0xb4, 0xa4, 0x1e, 0x23, 0x87, 0xbd, 0xd6, 0x75, 0x94, 0x7e, 0x93, 0xa5, 0xeb, 0xa8, 0x3a, + 0xe4, 0x36, 0x3c, 0xdc, 0x0b, 0x7c, 0x1e, 0x5c, 0xbf, 0x42, 0x9d, 0x56, 0xc7, 0xf5, 0xa8, 0x32, + 0xf3, 0x08, 0x57, 0x93, 0x47, 0x0f, 0x0f, 0xaa, 0x0f, 0x6f, 0x64, 0xa3, 0xe0, 0xb0, 0xba, 0xc9, + 0xf0, 0xcc, 0xf1, 0x13, 0x84, 0x67, 0xfe, 0x82, 0x36, 0xa6, 0xea, 0x48, 0x80, 0xcf, 0xe4, 0xd5, + 0x95, 0x59, 0x31, 0x01, 0x7a, 0x48, 0xd5, 0x24, 0x53, 0xd4, 0xec, 0x87, 0x5b, 0xec, 0x26, 0xce, + 0x68, 0xb1, 0x8b, 0x83, 0x3f, 0x26, 0xdf, 0xcf, 0xe0, 0x8f, 0xd2, 0x07, 0x2a, 0xf8, 0xe3, 0x3b, + 0x16, 0x5c, 0x74, 0x06, 0xc3, 0xae, 0xf3, 0x31, 0x1e, 0x67, 0xc4, 0x73, 0xd7, 0x1f, 0x95, 0x42, + 0x66, 0x45, 0xb7, 0x63, 0x96, 0x28, 0xf6, 0xbb, 0x45, 0x98, 0x4b, 0x2b, 0x49, 0xe7, 0x1f, 0x9f, + 0xfa, 0xb7, 0x2d, 0x98, 0x53, 0x13, 0x5c, 0xf0, 0xd4, 0x47, 0x8c, 0xb5, 0x9c, 0xd6, 0x15, 0xa1, + 0xee, 0xe9, 0xb4, 0x21, 0x9b, 0x29, 0x6e, 0x38, 0xc0, 0x9f, 0xbc, 0x09, 0x15, 0x7d, 0xab, 0x72, + 0xa6, 0x60, 0x55, 0x1e, 0x4f, 0x59, 0x8b, 0x49, 0xa0, 0x49, 0x8f, 0xbc, 0x6b, 0x01, 0x34, 0xd5, + 0x4e, 0x9c, 0x53, 0x28, 0x50, 0x86, 0xb6, 0x10, 0xeb, 0xf3, 0xba, 0x28, 0x44, 0x83, 0x31, 0xf9, + 0x25, 0x7e, 0x9f, 0xa2, 0x47, 0x82, 0x48, 0x47, 0x32, 0xb2, 0xdb, 0xfb, 0x11, 0xba, 0x73, 0xac, + 0xed, 0x19, 0xa0, 0x10, 0x13, 0x42, 0xd8, 0x2f, 0x83, 0x76, 0x54, 0x66, 0x2b, 0x2b, 0x77, 0x55, + 0xde, 0x70, 0xa2, 0x1d, 0x39, 0x04, 0xf5, 0xca, 0x7a, 0x4d, 0x01, 0x30, 0xc6, 0xb1, 0x3f, 0x07, + 0x33, 0xaf, 0x06, 0x4e, 0x6f, 0xc7, 0xe5, 0xf7, 0x16, 0xec, 0x7c, 0xfc, 0x0c, 0x4c, 0x3a, 0xad, + 0x56, 0x56, 0x86, 0x9b, 0x9a, 0x28, 0x46, 0x05, 0x3f, 0xd1, 0x51, 0xd8, 0xfe, 0x3d, 0x0b, 0x48, + 0x7c, 0xf7, 0xeb, 0x7a, 0xed, 0x75, 0x27, 0x6a, 0xee, 0xb0, 0x23, 0xdc, 0x0e, 0x2f, 0xcd, 0x3a, + 0xc2, 0x5d, 0xd7, 0x10, 0x34, 0xb0, 0xc8, 0xdb, 0x50, 0x11, 0xff, 0xde, 0xd0, 0x07, 0xc4, 0xd1, + 0xfd, 0xad, 0xf9, 0x9e, 0xc7, 0x65, 0x12, 0xa3, 0xf0, 0x7a, 0xcc, 0x01, 0x4d, 0x76, 0xac, 0xa9, + 0x56, 0xbd, 0xed, 0x4e, 0xff, 0x7e, 0x6b, 0x2b, 0x6e, 0xaa, 0x5e, 0xe0, 0x6f, 0xbb, 0x1d, 0x9a, + 0x6e, 0xaa, 0x0d, 0x51, 0x8c, 0x0a, 0x7e, 0xb2, 0xa6, 0xfa, 0xd7, 0x16, 0x5c, 0x5a, 0x0d, 0x23, + 0xd7, 0x5f, 0xa1, 0x61, 0xc4, 0x76, 0x3e, 0xb6, 0x3e, 0xf6, 0x3b, 0x27, 0x89, 0x39, 0x58, 0x81, + 0x39, 0x79, 0x0f, 0xdd, 0xdf, 0x0a, 0x69, 0x64, 0x1c, 0x35, 0xf4, 0x3c, 0x5e, 0x4e, 0xc1, 0x71, + 0xa0, 0x06, 0xa3, 0x22, 0x2f, 0xa4, 0x63, 0x2a, 0x85, 0x24, 0x95, 0x46, 0x0a, 0x8e, 0x03, 0x35, + 0xec, 0xef, 0x17, 0xe0, 0x22, 0xff, 0x8c, 0x54, 0xbc, 0xd0, 0x37, 0x87, 0xc5, 0x0b, 0x8d, 0x38, + 0x95, 0x39, 0xaf, 0x33, 0x44, 0x0b, 0xfd, 0x2d, 0x0b, 0x66, 0x5b, 0xc9, 0x96, 0xce, 0xc7, 0x2e, + 0x97, 0xd5, 0x87, 0xc2, 0xed, 0x2e, 0x55, 0x88, 0x69, 0xfe, 0xe4, 0x97, 0x2d, 0x98, 0x4d, 0x8a, + 0xa9, 0x56, 0xf7, 0x73, 0x68, 0x24, 0xed, 0x27, 0x9f, 0x2c, 0x0f, 0x31, 0x2d, 0x82, 0xfd, 0xfb, + 0x63, 0xb2, 0x4b, 0xcf, 0x23, 0x18, 0x86, 0xdc, 0x83, 0x72, 0xd4, 0x09, 0x45, 0xa1, 0xfc, 0xda, + 0x11, 0x0f, 0xad, 0x9b, 0x6b, 0x0d, 0xe1, 0x02, 0x12, 0xeb, 0x95, 0xb2, 0x84, 0xe9, 0xc7, 0x8a, + 0x17, 0x67, 0xdc, 0xec, 0x49, 0xc6, 0xb9, 0x9c, 0x96, 0x37, 0x97, 0x37, 0xd2, 0x8c, 0x65, 0x09, + 0x63, 0xac, 0x78, 0xd9, 0xbf, 0x6e, 0x41, 0xf9, 0x86, 0xaf, 0xd6, 0x91, 0x9f, 0xc9, 0xc1, 0x16, + 0xa5, 0x55, 0x56, 0xad, 0xb4, 0xc4, 0xa7, 0xa0, 0x57, 0x12, 0x96, 0xa8, 0xc7, 0x0c, 0xda, 0x8b, + 0x3c, 0xd1, 0x1f, 0x23, 0x75, 0xc3, 0xdf, 0x1a, 0x6a, 0x3e, 0xfe, 0x95, 0x22, 0x4c, 0xbf, 0xe6, + 0xec, 0x53, 0x2f, 0x72, 0x4e, 0xbf, 0x49, 0xbc, 0x08, 0x15, 0xa7, 0xc7, 0xef, 0x32, 0x8d, 0x63, + 0x48, 0x6c, 0xdc, 0x89, 0x41, 0x68, 0xe2, 0xc5, 0x0b, 0x9a, 0x88, 0x4c, 0xc9, 0x5a, 0x8a, 0x96, + 0x53, 0x70, 0x1c, 0xa8, 0x41, 0x6e, 0x00, 0x91, 0xd1, 0xdc, 0xb5, 0x66, 0xd3, 0xef, 0x7b, 0x62, + 0x49, 0x13, 0x76, 0x1f, 0x7d, 0x1e, 0x5e, 0x1f, 0xc0, 0xc0, 0x8c, 0x5a, 0xe4, 0xb3, 0x30, 0xdf, + 0xe4, 0x94, 0xe5, 0xe9, 0xc8, 0xa4, 0x28, 0x4e, 0xc8, 0x3a, 0xd6, 0x63, 0x79, 0x08, 0x1e, 0x0e, + 0xa5, 0xc0, 0x24, 0x0d, 0x23, 0x3f, 0x70, 0xda, 0xd4, 0xa4, 0x3b, 0x91, 0x94, 0xb4, 0x31, 0x80, + 0x81, 0x19, 0xb5, 0xc8, 0x17, 0xa1, 0x1c, 0xed, 0x04, 0x34, 0xdc, 0xf1, 0x3b, 0x2d, 0xe9, 0x7b, + 0x32, 0xa2, 0x31, 0x50, 0xf6, 0xfe, 0xa6, 0xa2, 0x6a, 0x0c, 0x6f, 0x55, 0x84, 0x31, 0x4f, 0x12, + 0xc0, 0x44, 0xd8, 0xf4, 0x7b, 0x34, 0x94, 0xa7, 0x8a, 0x1b, 0xb9, 0x70, 0xe7, 0xc6, 0x2d, 0xc3, + 0x0c, 0xc9, 0x39, 0xa0, 0xe4, 0x64, 0xff, 0xee, 0x18, 0x4c, 0x99, 0x88, 0x27, 0x58, 0x9b, 0xbe, + 0x62, 0xc1, 0x54, 0xd3, 0xf7, 0xa2, 0xc0, 0xef, 0xc4, 0x59, 0x0a, 0x46, 0xd7, 0x28, 0x18, 0xa9, + 0x15, 0x1a, 0x39, 0x6e, 0xc7, 0xb0, 0xd6, 0x19, 0x6c, 0x30, 0xc1, 0x94, 0x7c, 0xc3, 0x82, 0xd9, + 0xd8, 0x55, 0x31, 0xb6, 0xf5, 0xe5, 0x2a, 0x88, 0x5e, 0xea, 0xaf, 0x26, 0x39, 0x61, 0x9a, 0xb5, + 0xbd, 0x05, 0x73, 0xe9, 0xde, 0x66, 0x4d, 0xd9, 0x73, 0xe4, 0x5c, 0x2f, 0xc4, 0x4d, 0xb9, 0xe1, + 0x84, 0x21, 0x72, 0x08, 0x79, 0x16, 0x4a, 0x5d, 0x27, 0x68, 0xbb, 0x9e, 0xd3, 0xe1, 0xad, 0x58, + 0x30, 0x16, 0x24, 0x59, 0x8e, 0x1a, 0xc3, 0xfe, 0x18, 0x4c, 0xad, 0x3b, 0x5e, 0x9b, 0xb6, 0xe4, + 0x3a, 0x7c, 0x7c, 0x38, 0xe6, 0x9f, 0x8c, 0x43, 0xc5, 0x38, 0x3e, 0x9e, 0xff, 0x39, 0x2b, 0x91, + 0x7d, 0xa7, 0x90, 0x63, 0xf6, 0x9d, 0x4f, 0x03, 0x6c, 0xbb, 0x9e, 0x1b, 0xee, 0x9c, 0x31, 0xaf, + 0x0f, 0xbf, 0x9b, 0xbf, 0xa6, 0x29, 0xa0, 0x41, 0x2d, 0xbe, 0x00, 0x2d, 0x1e, 0x91, 0x22, 0xef, + 0x5d, 0xcb, 0xd8, 0x6e, 0x26, 0xf2, 0x70, 0xf8, 0x30, 0x3a, 0x66, 0x51, 0x6d, 0x3f, 0xe2, 0x6e, + 0xea, 0xa8, 0x5d, 0x69, 0x13, 0x4a, 0x01, 0x0d, 0xfb, 0x5d, 0x7a, 0xa6, 0x0c, 0x3c, 0xdc, 0xf5, + 0x06, 0x65, 0x7d, 0xd4, 0x94, 0x16, 0x5e, 0x86, 0xe9, 0x84, 0x08, 0xa7, 0xba, 0x61, 0xf2, 0x21, + 0xd3, 0x46, 0x71, 0x96, 0xfb, 0x26, 0xd6, 0x17, 0x1d, 0x23, 0xf3, 0x8e, 0xee, 0x0b, 0xe1, 0x60, + 0x25, 0x60, 0xf6, 0x8f, 0x27, 0x40, 0xfa, 0x30, 0x9c, 0x60, 0xb9, 0x32, 0x6f, 0x2e, 0xc7, 0xce, + 0x70, 0x73, 0x79, 0x03, 0xa6, 0x5c, 0xcf, 0x8d, 0x5c, 0xa7, 0xc3, 0xed, 0x4f, 0x72, 0x3b, 0x55, + 0x1e, 0xe8, 0x53, 0xab, 0x06, 0x2c, 0x83, 0x4e, 0xa2, 0x2e, 0x79, 0x1d, 0x8a, 0x7c, 0xbf, 0x91, + 0x03, 0xf8, 0xf4, 0x8e, 0x16, 0xdc, 0xc7, 0x46, 0x84, 0xa5, 0x09, 0x4a, 0xfc, 0xf0, 0x21, 0x52, + 0x0f, 0xe9, 0xe3, 0xb7, 0x1c, 0xc7, 0xf1, 0xe1, 0x23, 0x05, 0xc7, 0x81, 0x1a, 0x8c, 0xca, 0xb6, + 0xe3, 0x76, 0xfa, 0x01, 0x8d, 0xa9, 0x4c, 0x24, 0xa9, 0x5c, 0x4b, 0xc1, 0x71, 0xa0, 0x06, 0xd9, + 0x86, 0x29, 0x59, 0x26, 0xdc, 0xe6, 0x26, 0xcf, 0xf8, 0x95, 0xdc, 0x3d, 0xf2, 0x9a, 0x41, 0x09, + 0x13, 0x74, 0x49, 0x1f, 0x2e, 0xb8, 0x5e, 0xd3, 0xf7, 0x9a, 0x9d, 0x7e, 0xe8, 0xee, 0xd1, 0x38, + 0x26, 0xec, 0x2c, 0xcc, 0x2e, 0x1f, 0x1e, 0x54, 0x2f, 0xac, 0xa6, 0xc9, 0xe1, 0x20, 0x07, 0xf2, + 0x25, 0x0b, 0x2e, 0x37, 0x7d, 0x2f, 0xe4, 0xa9, 0x2b, 0xf6, 0xe8, 0xd5, 0x20, 0xf0, 0x03, 0xc1, + 0xbb, 0x7c, 0x46, 0xde, 0xdc, 0xec, 0xb9, 0x9c, 0x45, 0x12, 0xb3, 0x39, 0x91, 0xb7, 0xa0, 0xd4, + 0x0b, 0xfc, 0x3d, 0xb7, 0x45, 0x03, 0xe9, 0x82, 0xb9, 0x96, 0x47, 0x3e, 0x9f, 0x0d, 0x49, 0x33, + 0x5e, 0x7a, 0x54, 0x09, 0x6a, 0x7e, 0xf6, 0xff, 0xa9, 0xc0, 0x4c, 0x12, 0x9d, 0xfc, 0x1c, 0x40, + 0x2f, 0xf0, 0xbb, 0x34, 0xda, 0xa1, 0x3a, 0xb6, 0xe7, 0xe6, 0xa8, 0x19, 0x5b, 0x14, 0x3d, 0xe5, + 0xb6, 0xc4, 0x96, 0x8b, 0xb8, 0x14, 0x0d, 0x8e, 0x24, 0x80, 0xc9, 0x5d, 0xb1, 0xed, 0x4a, 0x2d, + 0xe4, 0xb5, 0x5c, 0x74, 0x26, 0xc9, 0x99, 0x07, 0xa5, 0xc8, 0x22, 0x54, 0x8c, 0xc8, 0x16, 0x14, + 0xee, 0xd1, 0xad, 0x7c, 0xd2, 0x05, 0xdc, 0xa1, 0xf2, 0x34, 0x53, 0x9f, 0x3c, 0x3c, 0xa8, 0x16, + 0xee, 0xd0, 0x2d, 0x64, 0xc4, 0xd9, 0x77, 0xb5, 0x84, 0xef, 0x82, 0x5c, 0x2a, 0x5e, 0xcb, 0xd1, + 0x11, 0x42, 0x7c, 0x97, 0x2c, 0x42, 0xc5, 0x88, 0xbc, 0x05, 0xe5, 0x7b, 0xce, 0x1e, 0xdd, 0x0e, + 0x7c, 0x2f, 0x92, 0xbe, 0x72, 0x23, 0x86, 0x7b, 0xdc, 0x51, 0xe4, 0x24, 0x5f, 0xbe, 0xbd, 0xeb, + 0x42, 0x8c, 0xd9, 0x91, 0x3d, 0x28, 0x79, 0xf4, 0x1e, 0xd2, 0x8e, 0xdb, 0xcc, 0x27, 0xbc, 0xe2, + 0xa6, 0xa4, 0x26, 0x39, 0xf3, 0x7d, 0x4f, 0x95, 0xa1, 0xe6, 0xc5, 0xfa, 0xf2, 0xae, 0xbf, 0x25, + 0x17, 0xaa, 0x11, 0xfb, 0x52, 0x9f, 0x4c, 0x45, 0x5f, 0xde, 0xf0, 0xb7, 0x90, 0x11, 0x67, 0x73, + 0xa4, 0xa9, 0x1d, 0xb5, 0xe4, 0x32, 0x75, 0x33, 0x5f, 0x07, 0x35, 0x31, 0x47, 0xe2, 0x52, 0x34, + 0x38, 0xb2, 0xb6, 0x6d, 0x4b, 0x63, 0xa5, 0x5c, 0xa8, 0x46, 0x6c, 0xdb, 0xa4, 0xe9, 0x53, 0xb4, + 0xad, 0x2a, 0x43, 0xcd, 0x8b, 0xf1, 0x75, 0xa5, 0xe5, 0x2f, 0x9f, 0xa5, 0x2a, 0x69, 0x47, 0x14, + 0x7c, 0x55, 0x19, 0x6a, 0x5e, 0xac, 0xbd, 0xc3, 0xdd, 0xfd, 0x7b, 0x4e, 0x67, 0xd7, 0xf5, 0xda, + 0x32, 0x56, 0x75, 0xd4, 0x34, 0xdc, 0xbb, 0xfb, 0x77, 0x04, 0x3d, 0xb3, 0xbd, 0xe3, 0x52, 0x34, + 0x38, 0x92, 0xbf, 0x6f, 0xc1, 0x44, 0xaf, 0xd3, 0x6f, 0xbb, 0xde, 0xfc, 0x54, 0x1e, 0x4e, 0x4c, + 0xc9, 0x25, 0x77, 0x71, 0x83, 0x93, 0x16, 0x8a, 0xe2, 0x4f, 0x6a, 0xbf, 0x4b, 0x5e, 0xf8, 0xf5, + 0x3f, 0xae, 0xce, 0x53, 0xaf, 0xe9, 0xb7, 0x5c, 0xaf, 0xbd, 0x74, 0x37, 0xf4, 0xbd, 0x45, 0x74, + 0xee, 0x29, 0x1d, 0x5d, 0xca, 0xb4, 0xf0, 0x09, 0xa8, 0x18, 0x24, 0x8e, 0x53, 0xf4, 0xa6, 0x4c, + 0x45, 0xef, 0xd7, 0x27, 0x60, 0xca, 0x4c, 0xbe, 0x79, 0x02, 0xed, 0x4b, 0x9f, 0x38, 0xc6, 0x4e, + 0x73, 0xe2, 0x60, 0x47, 0x4c, 0xe3, 0x82, 0x4b, 0x99, 0xb7, 0x56, 0x73, 0x53, 0xb8, 0xe3, 0x23, + 0xa6, 0x51, 0x18, 0x62, 0x82, 0xe9, 0x29, 0x7c, 0x5e, 0x98, 0xda, 0x2a, 0x14, 0xbb, 0x62, 0x52, + 0x6d, 0x4d, 0xa8, 0x6a, 0xcf, 0x01, 0xc4, 0x59, 0x22, 0xe5, 0xc5, 0xa7, 0xd6, 0x87, 0x8d, 0xec, + 0x95, 0x06, 0x16, 0x79, 0x0a, 0x26, 0x98, 0xea, 0x43, 0x5b, 0x32, 0x94, 0x5e, 0x9f, 0xe3, 0xaf, + 0xf1, 0x52, 0x94, 0x50, 0xf2, 0x12, 0xd3, 0x52, 0x63, 0x85, 0x45, 0x46, 0xc8, 0x5f, 0x8a, 0xb5, + 0xd4, 0x18, 0x86, 0x09, 0x4c, 0x26, 0x3a, 0x65, 0xfa, 0x05, 0x5f, 0x1b, 0x0c, 0xd1, 0xb9, 0xd2, + 0x81, 0x02, 0xc6, 0xed, 0x4a, 0x29, 0x7d, 0x84, 0xcf, 0xe9, 0xa2, 0x61, 0x57, 0x4a, 0xc1, 0x71, + 0xa0, 0x06, 0xfb, 0x18, 0x79, 0x67, 0x5b, 0x11, 0x0e, 0xd3, 0x43, 0x6e, 0x5b, 0xbf, 0x6a, 0x9e, + 0xb5, 0x72, 0x9c, 0x43, 0x62, 0xd4, 0x9e, 0xfc, 0xb0, 0x35, 0xda, 0xb1, 0xe8, 0x73, 0x30, 0x93, + 0xdc, 0x85, 0x72, 0xbf, 0xf9, 0xf8, 0xda, 0x38, 0x5c, 0xbc, 0xd9, 0x76, 0xbd, 0x74, 0xa2, 0xb3, + 0xac, 0x57, 0x0d, 0xac, 0x53, 0xbf, 0x6a, 0xa0, 0x63, 0xf2, 0xe4, 0x9b, 0x01, 0xd9, 0x31, 0x79, + 0xea, 0x01, 0x87, 0x24, 0x2e, 0xf9, 0x23, 0x0b, 0x1e, 0x73, 0x5a, 0xe2, 0x5c, 0xe0, 0x74, 0x64, + 0xa9, 0x91, 0x8c, 0x5b, 0xce, 0xe8, 0x70, 0xc4, 0x5d, 0x7e, 0xf0, 0xe3, 0x17, 0x6b, 0x47, 0x70, + 0x15, 0x3d, 0xfe, 0x13, 0xf2, 0x0b, 0x1e, 0x3b, 0x0a, 0x15, 0x8f, 0x14, 0x9f, 0xfc, 0x35, 0x98, + 0x4d, 0x7c, 0xb0, 0xb4, 0x84, 0x97, 0xc5, 0x85, 0x45, 0x23, 0x09, 0xc2, 0x34, 0xee, 0xc2, 0x2d, + 0xf8, 0xf0, 0xb1, 0x72, 0x9e, 0x6a, 0xb0, 0x7d, 0xcf, 0x82, 0x29, 0x33, 0x27, 0x11, 0x79, 0x16, + 0x4a, 0x91, 0xbf, 0x4b, 0xbd, 0xdb, 0x81, 0x72, 0xd8, 0xd5, 0x03, 0x7d, 0x93, 0x97, 0xe3, 0x1a, + 0x6a, 0x0c, 0x86, 0xdd, 0xec, 0xb8, 0xd4, 0x8b, 0x56, 0x5b, 0xb2, 0x9b, 0x35, 0xf6, 0xb2, 0x28, + 0x5f, 0x41, 0x8d, 0x21, 0x7c, 0xec, 0xd8, 0xef, 0x06, 0x6d, 0x06, 0x54, 0xb9, 0xf7, 0x1b, 0x3e, + 0x76, 0x31, 0x0c, 0x13, 0x98, 0xc4, 0xd6, 0x26, 0xce, 0xf1, 0xf8, 0x5e, 0x23, 0x65, 0x92, 0xfc, + 0x2d, 0x0b, 0xca, 0xc2, 0x44, 0x8f, 0x74, 0x3b, 0xe5, 0x62, 0x9b, 0x32, 0x22, 0xd4, 0x36, 0x56, + 0xb3, 0x5c, 0x6c, 0x9f, 0x80, 0xf1, 0x5d, 0xd7, 0x53, 0x5f, 0xa2, 0xb7, 0xa5, 0xd7, 0x5c, 0xaf, + 0x85, 0x1c, 0xa2, 0x37, 0xae, 0xc2, 0xd0, 0x8d, 0x6b, 0x09, 0xca, 0xda, 0xf1, 0x44, 0x2e, 0xff, + 0xda, 0x7a, 0xab, 0x1d, 0x55, 0x30, 0xc6, 0xb1, 0x7f, 0xd5, 0x82, 0x19, 0x1e, 0xc3, 0x1d, 0x9f, + 0x87, 0x5f, 0xd4, 0xbe, 0x60, 0x42, 0xee, 0xc7, 0x93, 0xbe, 0x60, 0xef, 0x1d, 0x54, 0x2b, 0x22, + 0xea, 0x3b, 0xe9, 0x1a, 0xf6, 0x19, 0x69, 0x44, 0xe3, 0x1e, 0x6b, 0x63, 0xa7, 0xb6, 0xf1, 0xc4, + 0x62, 0x2a, 0x22, 0x18, 0xd3, 0xb3, 0xdf, 0x86, 0x29, 0x33, 0x18, 0x8b, 0xbc, 0x08, 0x95, 0x9e, + 0xeb, 0xb5, 0x93, 0x41, 0xbb, 0xfa, 0xa2, 0x61, 0x23, 0x06, 0xa1, 0x89, 0xc7, 0xab, 0xf9, 0x71, + 0xb5, 0xd4, 0xfd, 0xc4, 0x86, 0x6f, 0x56, 0x8b, 0xff, 0xf0, 0x87, 0x10, 0x32, 0x82, 0xfe, 0x72, + 0x7f, 0x08, 0x21, 0x83, 0xc7, 0xfb, 0xf7, 0x10, 0x42, 0x96, 0x30, 0x7f, 0xbe, 0x1e, 0x42, 0xf8, + 0x69, 0x38, 0x6d, 0x4e, 0x54, 0xb6, 0xd7, 0xdf, 0x33, 0x13, 0x2b, 0xe8, 0x16, 0x97, 0x99, 0x15, + 0x24, 0xd4, 0xfe, 0x9d, 0x02, 0xcc, 0xa5, 0x8f, 0xfc, 0x79, 0x7b, 0x53, 0x90, 0x6f, 0x58, 0x30, + 0xe3, 0x24, 0xf2, 0xcf, 0xe5, 0xf4, 0xaa, 0x52, 0x82, 0xa6, 0x91, 0xff, 0x2c, 0x51, 0x8e, 0x29, + 0xde, 0xe4, 0x2f, 0xc1, 0x64, 0xe4, 0x76, 0xa9, 0xdf, 0x17, 0x86, 0xc0, 0x82, 0x38, 0x90, 0x6f, + 0x8a, 0x22, 0x54, 0x30, 0xb6, 0x28, 0xbb, 0x5c, 0x83, 0x0a, 0xa8, 0xf4, 0x0c, 0x9e, 0x8b, 0x2d, + 0x97, 0xa2, 0x1c, 0x35, 0x06, 0xb9, 0x0f, 0x93, 0xc2, 0xef, 0x42, 0x39, 0xd8, 0xac, 0xe7, 0x64, + 0x9a, 0x10, 0xae, 0x1d, 0x71, 0x17, 0x88, 0xff, 0x21, 0x2a, 0x76, 0xf6, 0xc7, 0xe0, 0x94, 0x49, + 0x62, 0xed, 0xab, 0x40, 0xd0, 0xef, 0x74, 0xb6, 0x9c, 0xe6, 0xee, 0x1d, 0xd7, 0x6b, 0xf9, 0xf7, + 0xf8, 0x52, 0xb4, 0x04, 0xe5, 0x40, 0x86, 0xcc, 0x86, 0x72, 0xd4, 0xe8, 0xb5, 0x4c, 0xc5, 0xd2, + 0x86, 0x18, 0xe3, 0xd8, 0xbf, 0x3f, 0x06, 0x93, 0x32, 0xbe, 0xfb, 0x01, 0x44, 0x26, 0xec, 0x26, + 0xee, 0x83, 0x57, 0x73, 0x09, 0x4b, 0x1f, 0x1a, 0x96, 0x10, 0xa6, 0xc2, 0x12, 0x5e, 0xcb, 0x87, + 0xdd, 0xd1, 0x31, 0x09, 0xdf, 0x2d, 0xc2, 0x6c, 0x2a, 0x5e, 0x3e, 0x95, 0x4f, 0xda, 0x7a, 0x5f, + 0xf2, 0x49, 0x93, 0x30, 0x91, 0x53, 0x3c, 0x3f, 0x3f, 0xc6, 0xbf, 0x48, 0x2f, 0x9e, 0x97, 0x87, + 0x69, 0xf1, 0x83, 0xe3, 0x61, 0xfa, 0x5f, 0x2c, 0x78, 0x64, 0x68, 0xd6, 0x07, 0x9e, 0x34, 0x2c, + 0x48, 0x42, 0xe5, 0x7a, 0x91, 0x73, 0x6e, 0x1b, 0x7d, 0x77, 0x9c, 0xce, 0xf3, 0x94, 0x66, 0x4f, + 0x5e, 0x80, 0x29, 0xae, 0x9f, 0xb1, 0x95, 0x33, 0xa2, 0x3d, 0x79, 0xf5, 0xc5, 0x2f, 0x41, 0x1a, + 0x46, 0x39, 0x26, 0xb0, 0xec, 0xef, 0x58, 0x30, 0x3f, 0x2c, 0x85, 0xd4, 0x09, 0x0c, 0x33, 0x7f, + 0x35, 0x15, 0xd9, 0x51, 0x1d, 0x88, 0xec, 0x48, 0x99, 0x66, 0x54, 0x10, 0x87, 0x61, 0x15, 0x29, + 0x1c, 0x13, 0xb8, 0xf0, 0x07, 0x05, 0x98, 0x93, 0x22, 0xc6, 0x4a, 0xf1, 0x4b, 0x89, 0x78, 0x94, + 0x9f, 0x48, 0xc5, 0xa3, 0x5c, 0x4a, 0xe3, 0xff, 0x45, 0x30, 0xca, 0x07, 0x2b, 0x18, 0xe5, 0xeb, + 0x45, 0xb8, 0x9c, 0x99, 0x49, 0x8a, 0x7c, 0x2d, 0x63, 0xa7, 0xb8, 0x93, 0x73, 0xca, 0x2a, 0x1d, + 0xb7, 0x7a, 0xbe, 0x11, 0x1c, 0xbf, 0x6c, 0x46, 0x4e, 0x88, 0xd5, 0x7f, 0xfb, 0x1c, 0x92, 0x6f, + 0x9d, 0x36, 0x88, 0xe2, 0xc1, 0xbe, 0xb7, 0xf5, 0xe7, 0x60, 0xa9, 0xff, 0x7a, 0x01, 0x9e, 0x3e, + 0x69, 0xcb, 0x7e, 0x40, 0xa3, 0x0e, 0xc3, 0x44, 0xd4, 0xe1, 0x03, 0x52, 0x6d, 0xce, 0x25, 0x00, + 0xf1, 0x1f, 0x8e, 0xeb, 0x7d, 0x77, 0x70, 0xc2, 0x9e, 0xc8, 0x51, 0x63, 0x92, 0xa9, 0xbe, 0x2a, + 0x2b, 0x79, 0xbc, 0x37, 0x4c, 0x36, 0x44, 0xf1, 0x7b, 0x07, 0xd5, 0x0b, 0x71, 0x82, 0x17, 0x59, + 0x88, 0xaa, 0x12, 0x79, 0x1a, 0x4a, 0x81, 0x80, 0xaa, 0x38, 0x2b, 0xe9, 0xed, 0x22, 0xca, 0x50, + 0x43, 0xc9, 0x17, 0x8d, 0xb3, 0xc2, 0xf8, 0x79, 0xe5, 0x31, 0x3a, 0xca, 0x89, 0xe7, 0x4d, 0x28, + 0x85, 0x2a, 0x75, 0xb6, 0x98, 0x4e, 0xcf, 0x9f, 0x30, 0x7c, 0xcf, 0xd9, 0xa2, 0x1d, 0x95, 0x47, + 0x5b, 0x7c, 0x9f, 0xce, 0xb2, 0xad, 0x49, 0x12, 0x5b, 0x9f, 0xbd, 0xc5, 0x25, 0x03, 0x0c, 0x9e, + 0xbb, 0x49, 0x04, 0x93, 0xf2, 0xfd, 0x5c, 0x79, 0xfb, 0xb9, 0x9e, 0x53, 0x1c, 0x8c, 0xf4, 0x92, + 0xe6, 0x47, 0x5a, 0x65, 0x03, 0x52, 0xac, 0xec, 0x1f, 0x58, 0x50, 0x91, 0x63, 0xe4, 0x01, 0xc4, + 0x31, 0xde, 0x4d, 0xc6, 0x31, 0x5e, 0xcd, 0x65, 0x09, 0x1f, 0x12, 0xc4, 0x78, 0x17, 0xa6, 0xcc, + 0x9c, 0x8e, 0xe4, 0xd3, 0xc6, 0x16, 0x64, 0x8d, 0x92, 0x25, 0x4d, 0x6d, 0x52, 0xf1, 0xf6, 0x64, + 0xff, 0x46, 0x59, 0xb7, 0x22, 0x3f, 0x38, 0x9b, 0x23, 0xdf, 0x3a, 0x72, 0xe4, 0x9b, 0x03, 0x6f, + 0x2c, 0xff, 0x81, 0xf7, 0x3a, 0x94, 0xd4, 0xb2, 0x28, 0xb5, 0xa9, 0x27, 0x4d, 0xb7, 0x69, 0xa6, + 0x92, 0x31, 0x62, 0xc6, 0x74, 0xe1, 0x07, 0xe0, 0xd8, 0x32, 0xad, 0x96, 0x6b, 0x4d, 0x86, 0xbc, + 0x05, 0x95, 0x7b, 0x7e, 0xb0, 0xdb, 0xf1, 0x1d, 0xfe, 0x5e, 0x01, 0xe4, 0x71, 0x53, 0xaf, 0xad, + 0xcb, 0x22, 0x76, 0xe5, 0x4e, 0x4c, 0x1f, 0x4d, 0x66, 0xa4, 0x06, 0xb3, 0x5d, 0xd7, 0x43, 0xea, + 0xb4, 0x74, 0xb8, 0xe2, 0xb8, 0xc8, 0x15, 0xae, 0x74, 0xfb, 0xf5, 0x24, 0x18, 0xd3, 0xf8, 0xdc, + 0xf2, 0x14, 0x24, 0x4c, 0x1d, 0x32, 0x21, 0xf0, 0xc6, 0xe8, 0x83, 0x31, 0x69, 0x3e, 0x11, 0xc1, + 0x1b, 0xc9, 0x72, 0x4c, 0xf1, 0x26, 0x5f, 0x80, 0x52, 0xa8, 0x5e, 0xa6, 0x2c, 0xe6, 0x78, 0xea, + 0xd1, 0xaf, 0x53, 0xea, 0xae, 0xd4, 0xcf, 0x53, 0x6a, 0x86, 0x64, 0x0d, 0x2e, 0x29, 0xdb, 0x4d, + 0xe2, 0x91, 0xbd, 0x89, 0x38, 0xbf, 0x17, 0x66, 0xc0, 0x31, 0xb3, 0x16, 0xd3, 0x6d, 0x79, 0xae, + 0x54, 0x71, 0x33, 0x6a, 0x5c, 0x26, 0xf2, 0xf9, 0xd7, 0x42, 0x09, 0x3d, 0x2a, 0x1a, 0xb7, 0x34, + 0x42, 0x34, 0x6e, 0x03, 0x2e, 0xa7, 0x41, 0x3c, 0x71, 0x1b, 0xcf, 0x15, 0x67, 0x6c, 0xa1, 0x1b, + 0x59, 0x48, 0x98, 0x5d, 0x97, 0xdc, 0x81, 0x72, 0x40, 0xf9, 0x29, 0xaf, 0xa6, 0x9c, 0xca, 0x4e, + 0xed, 0x3e, 0x8b, 0x8a, 0x00, 0xc6, 0xb4, 0x58, 0xbf, 0x3b, 0xc9, 0xec, 0xdd, 0xaf, 0xe7, 0xf8, + 0x74, 0xb5, 0xec, 0xfb, 0x21, 0x09, 0x15, 0xed, 0x7f, 0x3b, 0x0b, 0xd3, 0x09, 0x03, 0x14, 0x79, + 0x12, 0x8a, 0x3c, 0x93, 0x1d, 0x5f, 0xad, 0x4a, 0xf1, 0x8a, 0x2a, 0x1a, 0x47, 0xc0, 0xc8, 0x2f, + 0x5a, 0x30, 0xdb, 0x4b, 0x5c, 0xa8, 0xa8, 0x85, 0x7c, 0x44, 0xab, 0x6d, 0xf2, 0x96, 0xc6, 0x78, + 0xf7, 0x22, 0xc9, 0x0c, 0xd3, 0xdc, 0xd9, 0x7a, 0x20, 0x7d, 0xd0, 0x3b, 0x34, 0xe0, 0xd8, 0x52, + 0xd1, 0xd3, 0x24, 0x96, 0x93, 0x60, 0x4c, 0xe3, 0xb3, 0x1e, 0xe6, 0x5f, 0x37, 0xca, 0xf3, 0xa4, + 0x35, 0x45, 0x00, 0x63, 0x5a, 0xe4, 0x15, 0x98, 0x91, 0x49, 0x9b, 0x37, 0xfc, 0xd6, 0x75, 0x27, + 0xdc, 0x91, 0x47, 0x3e, 0x7d, 0x44, 0x5d, 0x4e, 0x40, 0x31, 0x85, 0xcd, 0xbf, 0x2d, 0xce, 0x8c, + 0xcd, 0x09, 0x4c, 0x24, 0x9f, 0x05, 0x59, 0x4e, 0x82, 0x31, 0x8d, 0x4f, 0x9e, 0x35, 0xb6, 0x21, + 0xe1, 0xad, 0xa0, 0x57, 0x83, 0x8c, 0xad, 0xa8, 0x06, 0xb3, 0x7d, 0x7e, 0x42, 0x6e, 0x29, 0xa0, + 0x9c, 0x8f, 0x9a, 0xe1, 0xed, 0x24, 0x18, 0xd3, 0xf8, 0xe4, 0x65, 0x98, 0x0e, 0xd8, 0x62, 0xab, + 0x09, 0x08, 0x17, 0x06, 0x7d, 0x43, 0x8d, 0x26, 0x10, 0x93, 0xb8, 0xe4, 0x55, 0xb8, 0x10, 0xe7, + 0x38, 0x55, 0x04, 0x84, 0x4f, 0x83, 0x4e, 0xb8, 0x57, 0x4b, 0x23, 0xe0, 0x60, 0x1d, 0xf2, 0x37, + 0x60, 0xce, 0x68, 0x89, 0x55, 0xaf, 0x45, 0xef, 0xcb, 0x3c, 0x94, 0xfc, 0x99, 0xab, 0xe5, 0x14, + 0x0c, 0x07, 0xb0, 0xc9, 0x27, 0x61, 0xa6, 0xe9, 0x77, 0x3a, 0x7c, 0x8d, 0x13, 0x4f, 0x52, 0x88, + 0x84, 0x93, 0x22, 0x35, 0x67, 0x02, 0x82, 0x29, 0x4c, 0x72, 0x03, 0x88, 0xbf, 0xc5, 0xd4, 0x2b, + 0xda, 0x7a, 0x95, 0x7a, 0x54, 0x6a, 0x1c, 0xd3, 0xc9, 0x08, 0x98, 0x5b, 0x03, 0x18, 0x98, 0x51, + 0x8b, 0xe7, 0xeb, 0x33, 0x22, 0x86, 0x67, 0xf2, 0x78, 0x4b, 0x33, 0x6d, 0xcf, 0x39, 0x36, 0x5c, + 0x38, 0x80, 0x09, 0x11, 0x90, 0x94, 0x4f, 0xe6, 0x49, 0x33, 0x3b, 0x7d, 0xbc, 0x47, 0x88, 0x52, + 0x94, 0x9c, 0xc8, 0xcf, 0x41, 0x79, 0x4b, 0x3d, 0x55, 0xc2, 0xd3, 0x4d, 0x8e, 0xbc, 0x2f, 0xa6, + 0x5e, 0xdd, 0x89, 0xed, 0x15, 0x1a, 0x80, 0x31, 0x4b, 0xf2, 0x14, 0x54, 0xae, 0x6f, 0xd4, 0xf4, + 0x28, 0xbc, 0xc0, 0x7b, 0x7f, 0x9c, 0x55, 0x41, 0x13, 0xc0, 0x66, 0x98, 0x56, 0xdf, 0x48, 0xf2, + 0x9a, 0x3e, 0x43, 0x1b, 0x63, 0xd8, 0xdc, 0xef, 0x00, 0x1b, 0xf3, 0x17, 0x53, 0xd8, 0xb2, 0x1c, + 0x35, 0x06, 0x79, 0x13, 0x2a, 0x72, 0xbf, 0xe0, 0x6b, 0xd3, 0xa5, 0xb3, 0x45, 0xa3, 0x63, 0x4c, + 0x02, 0x4d, 0x7a, 0xfc, 0xc2, 0x98, 0xbf, 0xe0, 0x40, 0xaf, 0xf5, 0x3b, 0x9d, 0xf9, 0xcb, 0x7c, + 0xdd, 0x8c, 0x2f, 0x8c, 0x63, 0x10, 0x9a, 0x78, 0xe4, 0x79, 0xe5, 0x3f, 0xf6, 0x50, 0xe2, 0x06, + 0x5d, 0xfb, 0x8f, 0x69, 0xa5, 0x7b, 0x48, 0xc0, 0xca, 0xc3, 0xc7, 0x38, 0x6e, 0x6d, 0xc1, 0x82, + 0xd2, 0xf8, 0x06, 0x27, 0xc9, 0xfc, 0x7c, 0xc2, 0x76, 0xb4, 0x70, 0x67, 0x28, 0x26, 0x1e, 0x41, + 0x85, 0x6c, 0x41, 0xc1, 0xe9, 0x6c, 0xcd, 0x3f, 0x92, 0x87, 0xea, 0x5a, 0x5b, 0xab, 0xcb, 0x11, + 0xc5, 0x9d, 0x4c, 0x6b, 0x6b, 0x75, 0x64, 0xc4, 0x89, 0x0b, 0xe3, 0x4e, 0x67, 0x2b, 0x9c, 0x5f, + 0xe0, 0x73, 0x36, 0x37, 0x26, 0xb1, 0xf1, 0x60, 0xad, 0x1e, 0x22, 0x67, 0x61, 0x7f, 0x69, 0x4c, + 0xdf, 0x12, 0xe9, 0xe4, 0xdf, 0x6f, 0x9b, 0x13, 0x48, 0x1c, 0x77, 0x6e, 0xe5, 0x36, 0x81, 0xa4, + 0x7a, 0x31, 0x3d, 0x74, 0xfa, 0xf4, 0xf4, 0x92, 0x91, 0x4b, 0xd6, 0xb0, 0x64, 0x62, 0x73, 0x71, + 0x7a, 0x4e, 0x2e, 0x18, 0xf6, 0x97, 0x2b, 0xda, 0x0a, 0x9a, 0xf2, 0xbd, 0x0a, 0xa0, 0xe8, 0x86, + 0x91, 0xeb, 0xe7, 0x18, 0xa4, 0x9d, 0xca, 0x08, 0xce, 0x63, 0x40, 0x38, 0x00, 0x05, 0x2b, 0xc6, + 0xd3, 0x6b, 0xbb, 0xde, 0x7d, 0xf9, 0xf9, 0xaf, 0xe7, 0xee, 0x54, 0x25, 0x78, 0x72, 0x00, 0x0a, + 0x56, 0xe4, 0xae, 0x18, 0xd4, 0x85, 0x3c, 0xfa, 0xba, 0xb6, 0x56, 0x4f, 0xf1, 0x4b, 0x0e, 0xee, + 0xbb, 0x50, 0x08, 0xbb, 0xae, 0x54, 0x97, 0x46, 0xe4, 0xd5, 0x58, 0x5f, 0xcd, 0xe2, 0xd5, 0x58, + 0x5f, 0x45, 0xc6, 0x84, 0x7c, 0xd5, 0x02, 0x70, 0xba, 0x5b, 0x4e, 0x18, 0x3a, 0x2d, 0x6d, 0x9d, + 0x19, 0xf1, 0xa5, 0x8f, 0x9a, 0xa6, 0x97, 0x62, 0xcd, 0xdd, 0x88, 0x63, 0x28, 0x1a, 0x9c, 0xc9, + 0x5b, 0x30, 0xe9, 0x88, 0xa7, 0x14, 0xa5, 0x47, 0x7c, 0x3e, 0xef, 0x83, 0xa6, 0x24, 0xe0, 0x66, + 0x1a, 0x09, 0x42, 0xc5, 0x90, 0xf1, 0x8e, 0x02, 0x87, 0x6e, 0xbb, 0xbb, 0xd2, 0x38, 0xd4, 0x18, + 0xf9, 0xb1, 0x0f, 0x46, 0x2c, 0x8b, 0xb7, 0x04, 0xa1, 0x62, 0x48, 0x7e, 0xde, 0x82, 0xe9, 0xae, + 0xe3, 0x39, 0x3a, 0xce, 0x31, 0x9f, 0x68, 0x58, 0x33, 0x72, 0x32, 0xd6, 0x10, 0xd7, 0x4d, 0x46, + 0x98, 0xe4, 0x4b, 0xf6, 0x60, 0xc2, 0xe1, 0x8f, 0xbc, 0xca, 0xa3, 0x18, 0xe6, 0xf1, 0x60, 0x6c, + 0xaa, 0x0d, 0xf8, 0xe2, 0x22, 0x9f, 0x92, 0x95, 0xdc, 0xc8, 0xaf, 0x59, 0x30, 0x29, 0x9c, 0xb5, + 0x99, 0x42, 0xca, 0xbe, 0xfd, 0x73, 0xe7, 0xf0, 0xb2, 0x80, 0x74, 0x24, 0x97, 0xee, 0x47, 0x1f, + 0xd1, 0x9e, 0xa8, 0xa2, 0xf4, 0x48, 0x57, 0x72, 0x25, 0x1d, 0x53, 0x7d, 0xbb, 0xce, 0xfd, 0xc4, + 0x53, 0x2e, 0xa6, 0xea, 0xbb, 0x9e, 0x82, 0xe1, 0x00, 0xf6, 0xc2, 0x27, 0x61, 0xca, 0x94, 0xe3, + 0x54, 0xee, 0xe8, 0x3f, 0x2a, 0x00, 0xf0, 0xae, 0x12, 0xb9, 0x51, 0xba, 0x3c, 0x91, 0xf2, 0x8e, + 0xdf, 0xca, 0xe9, 0x49, 0x49, 0x23, 0xc5, 0x09, 0xc8, 0xac, 0xc9, 0x3b, 0x7e, 0x0b, 0x25, 0x13, + 0xd2, 0x86, 0xf1, 0x9e, 0x13, 0xed, 0xe4, 0x9f, 0x4f, 0xa5, 0x24, 0x82, 0x84, 0xa3, 0x1d, 0xe4, + 0x0c, 0xc8, 0x3b, 0x56, 0xec, 0xd9, 0x53, 0xc8, 0x23, 0x17, 0x6c, 0xdc, 0x66, 0x8b, 0xd2, 0x97, + 0x27, 0x95, 0x12, 0x35, 0xed, 0xe1, 0xb3, 0xf0, 0xae, 0x05, 0x53, 0x26, 0x6a, 0x46, 0x37, 0xfd, + 0xac, 0xd9, 0x4d, 0x79, 0xb6, 0x87, 0xd9, 0xe3, 0xff, 0xcd, 0x02, 0xc0, 0xbe, 0xd7, 0xe8, 0x77, + 0xbb, 0x4c, 0x6d, 0xd7, 0x5e, 0xf7, 0xd6, 0x89, 0xbd, 0xee, 0xc7, 0x4e, 0xe9, 0x75, 0x5f, 0x38, + 0x95, 0xd7, 0xfd, 0xf8, 0xe9, 0xbd, 0xee, 0x8b, 0xc3, 0xbd, 0xee, 0xed, 0x6f, 0x59, 0x70, 0x61, + 0x60, 0xbf, 0x62, 0x9a, 0x74, 0xe0, 0xfb, 0xd1, 0x10, 0x8f, 0x4d, 0x8c, 0x41, 0x68, 0xe2, 0x91, + 0x15, 0x98, 0x93, 0xcf, 0x86, 0x34, 0x7a, 0x1d, 0x37, 0x33, 0xd7, 0xcd, 0x66, 0x0a, 0x8e, 0x03, + 0x35, 0xec, 0xdf, 0xb1, 0xa0, 0x62, 0x44, 0xc8, 0xb3, 0xef, 0xe0, 0x6e, 0xbb, 0x52, 0x8c, 0xf8, + 0xc5, 0x14, 0x7e, 0xd5, 0x25, 0x60, 0xe2, 0x1a, 0xba, 0x6d, 0x24, 0x95, 0x8f, 0xaf, 0xa1, 0x59, + 0x29, 0x4a, 0xa8, 0x48, 0x17, 0x4e, 0x7b, 0xbc, 0xd1, 0x0b, 0x66, 0xba, 0x70, 0xda, 0x43, 0x0e, + 0xe1, 0xec, 0xd8, 0x91, 0x42, 0x7a, 0xe4, 0x1a, 0x0f, 0xb4, 0x38, 0x41, 0x84, 0x02, 0x46, 0x1e, + 0x87, 0x02, 0xf5, 0x5a, 0xd2, 0xfe, 0xa1, 0x9f, 0x50, 0xbd, 0xea, 0xb5, 0x90, 0x95, 0xdb, 0xb7, + 0x60, 0x4a, 0x78, 0x23, 0xbf, 0x46, 0xf7, 0x4f, 0xfc, 0x26, 0x2b, 0x1b, 0xed, 0xa9, 0x37, 0x59, + 0x59, 0x75, 0x56, 0x6e, 0xff, 0x13, 0x0b, 0x52, 0xaf, 0x08, 0x19, 0x37, 0x30, 0xd6, 0xd0, 0x1b, + 0x18, 0xd3, 0x6a, 0x3f, 0x76, 0xa4, 0xd5, 0xfe, 0x06, 0x90, 0x2e, 0x9b, 0x0a, 0xc9, 0x85, 0xb6, + 0x90, 0x7c, 0xda, 0x61, 0x7d, 0x00, 0x03, 0x33, 0x6a, 0xd9, 0xff, 0x58, 0x08, 0x6b, 0xbe, 0x2b, + 0x74, 0x7c, 0x03, 0xf4, 0xa1, 0xc8, 0x49, 0x49, 0xfb, 0xdb, 0x88, 0xb6, 0xeb, 0xc1, 0xbc, 0x56, + 0x71, 0x47, 0xca, 0x29, 0xcf, 0xb9, 0xd9, 0x7f, 0x20, 0x64, 0x35, 0x1e, 0x1e, 0x3a, 0x81, 0xac, + 0xdd, 0xa4, 0xac, 0xd7, 0xf3, 0x5a, 0x2b, 0xb3, 0x65, 0x24, 0x8b, 0x00, 0x3d, 0x1a, 0x34, 0xa9, + 0x17, 0xa9, 0x38, 0xa1, 0xa2, 0x8c, 0x58, 0xd5, 0xa5, 0x68, 0x60, 0xd8, 0xdf, 0x64, 0x13, 0x28, + 0x7e, 0xad, 0x98, 0x3c, 0x9d, 0x76, 0x75, 0x4d, 0x4f, 0x0e, 0xed, 0xe9, 0x6a, 0x44, 0x8f, 0x8c, + 0x1d, 0x13, 0x3d, 0xf2, 0x0c, 0x4c, 0x06, 0x7e, 0x87, 0xd6, 0x02, 0x2f, 0xed, 0xa3, 0x83, 0xac, + 0x18, 0x6f, 0xa2, 0x82, 0xdb, 0xbf, 0x62, 0xc1, 0x5c, 0x3a, 0xbc, 0x2d, 0x77, 0xff, 0x5b, 0x33, + 0x06, 0xbf, 0x70, 0xfa, 0x18, 0x7c, 0xfb, 0x1d, 0x26, 0x64, 0xe4, 0x36, 0x77, 0x5d, 0x4f, 0x84, + 0xad, 0xb3, 0x96, 0x7b, 0x06, 0x26, 0xa9, 0x7c, 0x75, 0x55, 0x98, 0x91, 0xb5, 0x90, 0xea, 0xb1, + 0x55, 0x05, 0x27, 0x35, 0x98, 0x55, 0x97, 0x67, 0xca, 0xf6, 0x2f, 0xd2, 0x6d, 0x68, 0x5b, 0xe3, + 0x4a, 0x12, 0x8c, 0x69, 0x7c, 0xfb, 0x8b, 0x50, 0x31, 0x36, 0x25, 0xbe, 0x7e, 0xdf, 0x77, 0x9a, + 0x51, 0x7a, 0xdd, 0xbb, 0xca, 0x0a, 0x51, 0xc0, 0xf8, 0x15, 0x85, 0x88, 0xbe, 0x49, 0xad, 0x7b, + 0x32, 0xe6, 0x46, 0x42, 0x19, 0xb1, 0x80, 0xb6, 0xe9, 0x7d, 0x95, 0xf3, 0x5f, 0x11, 0x43, 0x56, + 0x88, 0x02, 0x66, 0x3f, 0x0b, 0x25, 0x95, 0x14, 0x89, 0x67, 0x16, 0x51, 0xe6, 0x73, 0x33, 0xb3, + 0x88, 0x1f, 0x44, 0xc8, 0x21, 0xf6, 0x1b, 0x50, 0x52, 0xb9, 0x9b, 0x8e, 0xc7, 0x66, 0x4b, 0x51, + 0xe8, 0xb9, 0xd7, 0xfd, 0x30, 0x52, 0x09, 0xa7, 0xc4, 0x0d, 0xdf, 0xcd, 0x55, 0x5e, 0x86, 0x1a, + 0x6a, 0xff, 0x99, 0x05, 0x95, 0xcd, 0xcd, 0x35, 0x7d, 0xf0, 0x47, 0x78, 0x28, 0x14, 0x2d, 0x54, + 0xdb, 0x8e, 0xa8, 0xe9, 0x4a, 0x20, 0x16, 0xbe, 0x85, 0xc3, 0x83, 0xea, 0x43, 0x8d, 0x4c, 0x0c, + 0x1c, 0x52, 0x93, 0xac, 0xc2, 0x45, 0x13, 0x22, 0x13, 0x01, 0xc8, 0x35, 0x92, 0x3f, 0xd3, 0xdb, + 0x18, 0x04, 0x63, 0x56, 0x9d, 0x34, 0x29, 0xb9, 0xdd, 0x9b, 0x2f, 0xfe, 0x36, 0x06, 0xc1, 0x98, + 0x55, 0xc7, 0x7e, 0x1e, 0x66, 0x53, 0x77, 0xdc, 0x27, 0x48, 0xc0, 0xf2, 0xbb, 0x05, 0x98, 0x32, + 0xaf, 0x3a, 0x4f, 0xb0, 0x7e, 0x9d, 0x7c, 0x5b, 0xc8, 0xb8, 0x9e, 0x2c, 0x9c, 0xf2, 0x7a, 0xd2, + 0xbc, 0x0f, 0x1e, 0x3f, 0xdf, 0xfb, 0xe0, 0x62, 0x3e, 0xf7, 0xc1, 0x86, 0xdf, 0xc2, 0xc4, 0x83, + 0xf3, 0x5b, 0xf8, 0xed, 0x22, 0xcc, 0x24, 0x33, 0x7a, 0x9e, 0xa0, 0x27, 0x9f, 0x1d, 0xe8, 0xc9, + 0x53, 0xde, 0x87, 0x14, 0x46, 0xbd, 0x0f, 0x19, 0x1f, 0xf5, 0x3e, 0xa4, 0x78, 0x86, 0xfb, 0x90, + 0xc1, 0xdb, 0x8c, 0x89, 0x13, 0xdf, 0x66, 0x7c, 0x4a, 0xbb, 0x78, 0x4e, 0x26, 0x5c, 0x80, 0x62, + 0x17, 0x4f, 0x92, 0xec, 0x86, 0x65, 0xbf, 0x95, 0xe9, 0x9a, 0x5a, 0x3a, 0xc6, 0xee, 0x1b, 0x64, + 0x7a, 0x64, 0x9e, 0xfe, 0xca, 0xf5, 0xa1, 0x53, 0x78, 0x63, 0xbe, 0x08, 0x15, 0x39, 0x9e, 0xb8, + 0xf2, 0x0d, 0x49, 0xc5, 0xbd, 0x11, 0x83, 0xd0, 0xc4, 0xe3, 0x0f, 0xf6, 0xc7, 0x13, 0x84, 0xdf, + 0xcc, 0x55, 0x52, 0x0f, 0xf6, 0x27, 0xc1, 0x98, 0xc6, 0xb7, 0xbf, 0x00, 0x97, 0x33, 0x4d, 0x30, + 0xdc, 0xfc, 0xcd, 0xf5, 0x42, 0xda, 0x92, 0x08, 0x86, 0x18, 0xa9, 0x87, 0x3e, 0x16, 0xee, 0x0c, + 0xc5, 0xc4, 0x23, 0xa8, 0xd8, 0xbf, 0x59, 0x80, 0x99, 0xe4, 0x6b, 0xaf, 0xe4, 0x9e, 0x36, 0xd8, + 0xe6, 0x62, 0x2b, 0x16, 0x64, 0x8d, 0x2c, 0x91, 0x43, 0x2f, 0x7a, 0xee, 0xf1, 0xf1, 0xb5, 0xa5, + 0x53, 0x56, 0x9e, 0x1f, 0x63, 0x79, 0xc3, 0x22, 0xd9, 0xf1, 0x07, 0x5d, 0xe3, 0x80, 0x52, 0x79, + 0x8e, 0xcf, 0x9d, 0x7b, 0x1c, 0x18, 0xa9, 0x59, 0xa1, 0xc1, 0x96, 0xed, 0x2d, 0x7b, 0x34, 0x70, + 0xb7, 0x5d, 0xfd, 0x52, 0x3d, 0x5f, 0xb9, 0xdf, 0x90, 0x65, 0xa8, 0xa1, 0xf6, 0x3b, 0x63, 0x50, + 0xe6, 0xf9, 0xaf, 0xae, 0x05, 0x7e, 0x97, 0x3f, 0x89, 0x18, 0x1a, 0x67, 0x26, 0xd9, 0x6d, 0x37, + 0x46, 0x7d, 0x77, 0x34, 0xa6, 0x28, 0xdd, 0xdd, 0x8d, 0x12, 0x4c, 0x70, 0x24, 0x3d, 0x28, 0x6d, + 0xcb, 0x7c, 0xbd, 0xb2, 0xef, 0x46, 0xcc, 0x39, 0xa9, 0xb2, 0xff, 0x8a, 0x26, 0x50, 0xff, 0x50, + 0x73, 0xb1, 0x1d, 0x98, 0x4d, 0x25, 0x30, 0xc9, 0x3d, 0xcb, 0xef, 0xff, 0x1c, 0x87, 0xb2, 0x8e, + 0xb3, 0x22, 0x9f, 0x48, 0x18, 0xb0, 0xca, 0xf5, 0x0f, 0x1b, 0xef, 0x75, 0xed, 0xf8, 0xad, 0xf7, + 0x0e, 0xaa, 0xb3, 0x1a, 0x39, 0x65, 0x8c, 0x7a, 0x1c, 0x0a, 0xfd, 0xa0, 0x93, 0x3e, 0xa1, 0xde, + 0xc6, 0x35, 0x64, 0xe5, 0x66, 0x6c, 0x58, 0xe1, 0x81, 0xc6, 0x86, 0xb1, 0x5d, 0x72, 0xcb, 0x6f, + 0xed, 0xa7, 0xdf, 0xf7, 0xaa, 0xfb, 0xad, 0x7d, 0xe4, 0x10, 0xf2, 0x0a, 0xcc, 0xc8, 0x80, 0x37, + 0xa5, 0xc4, 0x14, 0xb9, 0x9e, 0xaa, 0x1d, 0x17, 0x36, 0x13, 0x50, 0x4c, 0x61, 0xb3, 0x5d, 0xf6, + 0x6e, 0xe8, 0x7b, 0x3c, 0x77, 0xf3, 0x44, 0xf2, 0x96, 0xf3, 0x46, 0xe3, 0xd6, 0x4d, 0x6e, 0x48, + 0xd3, 0x18, 0x89, 0x98, 0xba, 0xc9, 0x63, 0x63, 0xea, 0x56, 0x04, 0x6d, 0x26, 0x2d, 0xdf, 0x51, + 0xa6, 0xea, 0x4f, 0x2b, 0xba, 0xac, 0xec, 0xbd, 0x83, 0x23, 0x8c, 0xa4, 0xba, 0x66, 0x56, 0xf4, + 0x61, 0xf9, 0xfd, 0x8b, 0x3e, 0xb4, 0x6f, 0xc3, 0x6c, 0xaa, 0xff, 0x94, 0x81, 0xc3, 0xca, 0x36, + 0x70, 0x9c, 0xec, 0x85, 0xb0, 0x7f, 0x66, 0xc1, 0x85, 0x81, 0x15, 0xe9, 0xa4, 0x61, 0xa0, 0xe9, + 0xbd, 0x71, 0xec, 0xec, 0x7b, 0x63, 0xe1, 0x74, 0x7b, 0x63, 0x7d, 0xeb, 0x7b, 0x3f, 0xbc, 0xf2, + 0xa1, 0xef, 0xff, 0xf0, 0xca, 0x87, 0xfe, 0xf0, 0x87, 0x57, 0x3e, 0xf4, 0xce, 0xe1, 0x15, 0xeb, + 0x7b, 0x87, 0x57, 0xac, 0xef, 0x1f, 0x5e, 0xb1, 0xfe, 0xf0, 0xf0, 0x8a, 0xf5, 0x9f, 0x0f, 0xaf, + 0x58, 0xdf, 0xfa, 0x93, 0x2b, 0x1f, 0xfa, 0xf4, 0xa7, 0xe2, 0x9e, 0x5a, 0x52, 0x3d, 0xc5, 0x7f, + 0x7c, 0x54, 0xf5, 0xcb, 0x52, 0x6f, 0xb7, 0xbd, 0xc4, 0x7a, 0x6a, 0x49, 0x97, 0xa8, 0x9e, 0xfa, + 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x7d, 0x66, 0x21, 0xe0, 0xa6, 0x00, 0x00, } func (m *ALBStatus) Marshal() (dAtA []byte, err error) { @@ -4197,6 +4276,18 @@ func (m *AnalysisRunSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TTLStrategy != nil { + { + size, err := m.TTLStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.MeasurementRetention) > 0 { for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { { @@ -4284,6 +4375,18 @@ func (m *AnalysisRunStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.CompletedAt != nil { + { + size, err := m.CompletedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.DryRunSummary != nil { { size, err := m.DryRunSummary.MarshalToSizedBuffer(dAtA[:i]) @@ -4468,6 +4571,42 @@ func (m *AnalysisTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *AnalysisTemplateRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AnalysisTemplateRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AnalysisTemplateRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *AnalysisTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4488,6 +4627,20 @@ func (m *AnalysisTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Templates) > 0 { + for iNdEx := len(m.Templates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Templates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.MeasurementRetention) > 0 { for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { { @@ -4914,6 +5067,49 @@ func (m *ArgumentValueFrom) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Authentication) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Authentication) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Authentication) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.OAuth2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Sigv4.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *AwsResourceRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5860,11 +6056,45 @@ func (m *DatadogMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Aggregator) + copy(dAtA[i:], m.Aggregator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Aggregator))) + i-- + dAtA[i] = 0x32 i -= len(m.ApiVersion) copy(dAtA[i:], m.ApiVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ApiVersion))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a + i -= len(m.Formula) + copy(dAtA[i:], m.Formula) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Formula))) + i-- + dAtA[i] = 0x22 + if len(m.Queries) > 0 { + keysForQueries := make([]string, 0, len(m.Queries)) + for k := range m.Queries { + keysForQueries = append(keysForQueries, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForQueries) + for iNdEx := len(keysForQueries) - 1; iNdEx >= 0; iNdEx-- { + v := m.Queries[string(keysForQueries[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForQueries[iNdEx]) + copy(dAtA[i:], keysForQueries[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForQueries[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } i -= len(m.Query) copy(dAtA[i:], m.Query) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Query))) @@ -6195,6 +6425,16 @@ func (m *ExperimentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.AnalysisRunMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a if len(m.MeasurementRetention) > 0 { for iNdEx := len(m.MeasurementRetention) - 1; iNdEx >= 0; iNdEx-- { { @@ -7504,6 +7744,53 @@ func (m *NginxTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *OAuth2Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuth2Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuth2Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.ClientSecret) + copy(dAtA[i:], m.ClientSecret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientSecret))) + i-- + dAtA[i] = 0x1a + i -= len(m.ClientID) + copy(dAtA[i:], m.ClientID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientID))) + i-- + dAtA[i] = 0x12 + i -= len(m.TokenURL) + copy(dAtA[i:], m.TokenURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TokenURL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ObjectRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7524,6 +7811,11 @@ func (m *ObjectRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.ScaleDown) + copy(dAtA[i:], m.ScaleDown) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScaleDown))) + i-- + dAtA[i] = 0x22 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -7710,39 +8002,6 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) MarshalToSizedBuffer(d return len(dAtA) - i, nil } -func (m *PrometheusAuth) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PrometheusAuth) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PrometheusAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Sigv4.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - return len(dAtA) - i, nil -} - func (m *PrometheusMetric) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8080,42 +8339,6 @@ func (m *RolloutAnalysisRunStatus) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *RolloutAnalysisTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RolloutAnalysisTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RolloutAnalysisTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ClusterScope { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.TemplateName) - copy(dAtA[i:], m.TemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *RolloutCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8199,6 +8422,30 @@ func (m *RolloutExperimentStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.AnalysisRunMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DryRun[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Analyses) > 0 { for iNdEx := len(m.Analyses) - 1; iNdEx >= 0; iNdEx-- { { @@ -8893,6 +9140,11 @@ func (m *RolloutTrafficRouting) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.MaxTrafficWeight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxTrafficWeight)) + i-- + dAtA[i] = 0x58 + } if len(m.Plugins) > 0 { keysForPlugins := make([]string, 0, len(m.Plugins)) for k := range m.Plugins { @@ -9597,6 +9849,44 @@ func (m *TLSRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TTLStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TTLStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TTLStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecondsAfterSuccess != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterSuccess)) + i-- + dAtA[i] = 0x18 + } + if m.SecondsAfterFailure != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterFailure)) + i-- + dAtA[i] = 0x10 + } + if m.SecondsAfterCompletion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SecondsAfterCompletion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *TemplateService) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9967,6 +10257,16 @@ func (m *WebMetric) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.Authentication.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a if m.JSONBody != nil { i -= len(m.JSONBody) copy(dAtA[i:], m.JSONBody) @@ -10265,6 +10565,10 @@ func (m *AnalysisRunSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.TTLStrategy != nil { + l = m.TTLStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10294,6 +10598,10 @@ func (m *AnalysisRunStatus) Size() (n int) { l = m.DryRunSummary.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.CompletedAt != nil { + l = m.CompletedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -10342,6 +10650,18 @@ func (m *AnalysisTemplateList) Size() (n int) { return n } +func (m *AnalysisTemplateRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TemplateName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *AnalysisTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -10372,6 +10692,12 @@ func (m *AnalysisTemplateSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10520,6 +10846,19 @@ func (m *ArgumentValueFrom) Size() (n int) { return n } +func (m *Authentication) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Sigv4.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OAuth2.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *AwsResourceRef) Size() (n int) { if m == nil { return 0 @@ -10873,8 +11212,20 @@ func (m *DatadogMetric) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Query) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Queries) > 0 { + for k, v := range m.Queries { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Formula) + n += 1 + l + sovGenerated(uint64(l)) l = len(m.ApiVersion) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Aggregator) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11019,6 +11370,8 @@ func (m *ExperimentSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = m.AnalysisRunMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11480,6 +11833,27 @@ func (m *NginxTrafficRouting) Size() (n int) { return n } +func (m *OAuth2Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TokenURL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientSecret) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ObjectRef) Size() (n int) { if m == nil { return 0 @@ -11492,6 +11866,8 @@ func (m *ObjectRef) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ScaleDown) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -11556,17 +11932,6 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Size() (n int) { return n } -func (m *PrometheusAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Sigv4.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *PrometheusMetric) Size() (n int) { if m == nil { return 0 @@ -11690,18 +12055,6 @@ func (m *RolloutAnalysisRunStatus) Size() (n int) { return n } -func (m *RolloutAnalysisTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TemplateName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - func (m *RolloutCondition) Size() (n int) { if m == nil { return 0 @@ -11743,6 +12096,14 @@ func (m *RolloutExperimentStep) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.DryRun) > 0 { + for _, e := range m.DryRun { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AnalysisRunMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12017,6 +12378,9 @@ func (m *RolloutTrafficRouting) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.MaxTrafficWeight != nil { + n += 1 + sovGenerated(uint64(*m.MaxTrafficWeight)) + } return n } @@ -12239,6 +12603,24 @@ func (m *TLSRoute) Size() (n int) { return n } +func (m *TTLStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SecondsAfterCompletion != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterCompletion)) + } + if m.SecondsAfterFailure != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterFailure)) + } + if m.SecondsAfterSuccess != nil { + n += 1 + sovGenerated(uint64(*m.SecondsAfterSuccess)) + } + return n +} + func (m *TemplateService) Size() (n int) { if m == nil { return 0 @@ -12394,6 +12776,8 @@ func (m *WebMetric) Size() (n int) { l = len(m.JSONBody) n += 1 + l + sovGenerated(uint64(l)) } + l = m.Authentication.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -12569,6 +12953,7 @@ func (this *AnalysisRunSpec) String() string { `Terminate:` + fmt.Sprintf("%v", this.Terminate) + `,`, `DryRun:` + repeatedStringForDryRun + `,`, `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, + `TTLStrategy:` + strings.Replace(this.TTLStrategy.String(), "TTLStrategy", "TTLStrategy", 1) + `,`, `}`, }, "") return s @@ -12589,6 +12974,7 @@ func (this *AnalysisRunStatus) String() string { `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1) + `,`, `RunSummary:` + strings.Replace(strings.Replace(this.RunSummary.String(), "RunSummary", "RunSummary", 1), `&`, ``, 1) + `,`, `DryRunSummary:` + strings.Replace(this.DryRunSummary.String(), "RunSummary", "RunSummary", 1) + `,`, + `CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -12631,6 +13017,17 @@ func (this *AnalysisTemplateList) String() string { }, "") return s } +func (this *AnalysisTemplateRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AnalysisTemplateRef{`, + `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `}`, + }, "") + return s +} func (this *AnalysisTemplateSpec) String() string { if this == nil { return "nil" @@ -12655,11 +13052,17 @@ func (this *AnalysisTemplateSpec) String() string { repeatedStringForMeasurementRetention += strings.Replace(strings.Replace(f.String(), "MeasurementRetention", "MeasurementRetention", 1), `&`, ``, 1) + "," } repeatedStringForMeasurementRetention += "}" + repeatedStringForTemplates := "[]AnalysisTemplateRef{" + for _, f := range this.Templates { + repeatedStringForTemplates += strings.Replace(strings.Replace(f.String(), "AnalysisTemplateRef", "AnalysisTemplateRef", 1), `&`, ``, 1) + "," + } + repeatedStringForTemplates += "}" s := strings.Join([]string{`&AnalysisTemplateSpec{`, `Metrics:` + repeatedStringForMetrics + `,`, `Args:` + repeatedStringForArgs + `,`, `DryRun:` + repeatedStringForDryRun + `,`, `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, + `Templates:` + repeatedStringForTemplates + `,`, `}`, }, "") return s @@ -12762,6 +13165,17 @@ func (this *ArgumentValueFrom) String() string { }, "") return s } +func (this *Authentication) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Authentication{`, + `Sigv4:` + strings.Replace(strings.Replace(this.Sigv4.String(), "Sigv4Config", "Sigv4Config", 1), `&`, ``, 1) + `,`, + `OAuth2:` + strings.Replace(strings.Replace(this.OAuth2.String(), "OAuth2Config", "OAuth2Config", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *AwsResourceRef) String() string { if this == nil { return "nil" @@ -12974,10 +13388,23 @@ func (this *DatadogMetric) String() string { if this == nil { return "nil" } + keysForQueries := make([]string, 0, len(this.Queries)) + for k := range this.Queries { + keysForQueries = append(keysForQueries, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForQueries) + mapStringForQueries := "map[string]string{" + for _, k := range keysForQueries { + mapStringForQueries += fmt.Sprintf("%v: %v,", k, this.Queries[k]) + } + mapStringForQueries += "}" s := strings.Join([]string{`&DatadogMetric{`, `Interval:` + fmt.Sprintf("%v", this.Interval) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `Queries:` + mapStringForQueries + `,`, + `Formula:` + fmt.Sprintf("%v", this.Formula) + `,`, `ApiVersion:` + fmt.Sprintf("%v", this.ApiVersion) + `,`, + `Aggregator:` + fmt.Sprintf("%v", this.Aggregator) + `,`, `}`, }, "") return s @@ -13100,6 +13527,7 @@ func (this *ExperimentSpec) String() string { `ScaleDownDelaySeconds:` + valueToStringGenerated(this.ScaleDownDelaySeconds) + `,`, `DryRun:` + repeatedStringForDryRun + `,`, `MeasurementRetention:` + repeatedStringForMeasurementRetention + `,`, + `AnalysisRunMetadata:` + strings.Replace(strings.Replace(this.AnalysisRunMetadata.String(), "AnalysisRunMetadata", "AnalysisRunMetadata", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13452,6 +13880,19 @@ func (this *NginxTrafficRouting) String() string { }, "") return s } +func (this *OAuth2Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuth2Config{`, + `TokenURL:` + fmt.Sprintf("%v", this.TokenURL) + `,`, + `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, + `ClientSecret:` + fmt.Sprintf("%v", this.ClientSecret) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} func (this *ObjectRef) String() string { if this == nil { return "nil" @@ -13460,6 +13901,7 @@ func (this *ObjectRef) String() string { `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ScaleDown:` + fmt.Sprintf("%v", this.ScaleDown) + `,`, `}`, }, "") return s @@ -13527,16 +13969,6 @@ func (this *PreferredDuringSchedulingIgnoredDuringExecution) String() string { }, "") return s } -func (this *PrometheusAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PrometheusAuth{`, - `Sigv4:` + strings.Replace(strings.Replace(this.Sigv4.String(), "Sigv4Config", "Sigv4Config", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *PrometheusMetric) String() string { if this == nil { return "nil" @@ -13549,7 +13981,7 @@ func (this *PrometheusMetric) String() string { s := strings.Join([]string{`&PrometheusMetric{`, `Address:` + fmt.Sprintf("%v", this.Address) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, - `Authentication:` + strings.Replace(strings.Replace(this.Authentication.String(), "PrometheusAuth", "PrometheusAuth", 1), `&`, ``, 1) + `,`, + `Authentication:` + strings.Replace(strings.Replace(this.Authentication.String(), "Authentication", "Authentication", 1), `&`, ``, 1) + `,`, `Timeout:` + valueToStringGenerated(this.Timeout) + `,`, `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, `Headers:` + repeatedStringForHeaders + `,`, @@ -13592,9 +14024,9 @@ func (this *RolloutAnalysis) String() string { if this == nil { return "nil" } - repeatedStringForTemplates := "[]RolloutAnalysisTemplate{" + repeatedStringForTemplates := "[]AnalysisTemplateRef{" for _, f := range this.Templates { - repeatedStringForTemplates += strings.Replace(strings.Replace(f.String(), "RolloutAnalysisTemplate", "RolloutAnalysisTemplate", 1), `&`, ``, 1) + "," + repeatedStringForTemplates += strings.Replace(strings.Replace(f.String(), "AnalysisTemplateRef", "AnalysisTemplateRef", 1), `&`, ``, 1) + "," } repeatedStringForTemplates += "}" repeatedStringForArgs := "[]AnalysisRunArgument{" @@ -13645,17 +14077,6 @@ func (this *RolloutAnalysisRunStatus) String() string { }, "") return s } -func (this *RolloutAnalysisTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RolloutAnalysisTemplate{`, - `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, - `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, - `}`, - }, "") - return s -} func (this *RolloutCondition) String() string { if this == nil { return "nil" @@ -13685,10 +14106,17 @@ func (this *RolloutExperimentStep) String() string { repeatedStringForAnalyses += strings.Replace(strings.Replace(f.String(), "RolloutExperimentStepAnalysisTemplateRef", "RolloutExperimentStepAnalysisTemplateRef", 1), `&`, ``, 1) + "," } repeatedStringForAnalyses += "}" + repeatedStringForDryRun := "[]DryRun{" + for _, f := range this.DryRun { + repeatedStringForDryRun += strings.Replace(strings.Replace(f.String(), "DryRun", "DryRun", 1), `&`, ``, 1) + "," + } + repeatedStringForDryRun += "}" s := strings.Join([]string{`&RolloutExperimentStep{`, `Templates:` + repeatedStringForTemplates + `,`, `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, `Analyses:` + repeatedStringForAnalyses + `,`, + `DryRun:` + repeatedStringForDryRun + `,`, + `AnalysisRunMetadata:` + strings.Replace(strings.Replace(this.AnalysisRunMetadata.String(), "AnalysisRunMetadata", "AnalysisRunMetadata", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13867,6 +14295,7 @@ func (this *RolloutTrafficRouting) String() string { `ManagedRoutes:` + repeatedStringForManagedRoutes + `,`, `Apisix:` + strings.Replace(this.Apisix.String(), "ApisixTrafficRouting", "ApisixTrafficRouting", 1) + `,`, `Plugins:` + mapStringForPlugins + `,`, + `MaxTrafficWeight:` + valueToStringGenerated(this.MaxTrafficWeight) + `,`, `}`, }, "") return s @@ -14056,6 +14485,18 @@ func (this *TLSRoute) String() string { }, "") return s } +func (this *TTLStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TTLStrategy{`, + `SecondsAfterCompletion:` + valueToStringGenerated(this.SecondsAfterCompletion) + `,`, + `SecondsAfterFailure:` + valueToStringGenerated(this.SecondsAfterFailure) + `,`, + `SecondsAfterSuccess:` + valueToStringGenerated(this.SecondsAfterSuccess) + `,`, + `}`, + }, "") + return s +} func (this *TemplateService) String() string { if this == nil { return "nil" @@ -14169,6 +14610,7 @@ func (this *WebMetric) String() string { `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, `JSONBody:` + valueToStringGenerated(this.JSONBody) + `,`, + `Authentication:` + strings.Replace(strings.Replace(this.Authentication.String(), "Authentication", "Authentication", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -15605,6 +16047,42 @@ func (m *AnalysisRunSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTLStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TTLStrategy == nil { + m.TTLStrategy = &TTLStrategy{} + } + if err := m.TTLStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15858,6 +16336,42 @@ func (m *AnalysisRunStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &v1.Time{} + } + if err := m.CompletedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -16202,6 +16716,108 @@ func (m *AnalysisTemplateList) Unmarshal(dAtA []byte) error { } return nil } +func (m *AnalysisTemplateRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AnalysisTemplateRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AnalysisTemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AnalysisTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16367,6 +16983,40 @@ func (m *AnalysisTemplateSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, AnalysisTemplateRef{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17420,7 +18070,7 @@ func (m *ArgumentValueFrom) Unmarshal(dAtA []byte) error { } return nil } -func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { +func (m *Authentication) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17443,17 +18093,17 @@ func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AwsResourceRef: wiretype end group for non-group") + return fmt.Errorf("proto: Authentication: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AwsResourceRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Authentication: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sigv4", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17463,29 +18113,30 @@ func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.Sigv4.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ARN", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OAuth2", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17495,55 +18146,24 @@ func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ARN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.OAuth2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.FullName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17566,7 +18186,7 @@ func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { +func (m *AwsResourceRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17589,47 +18209,193 @@ func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlueGreenStatus: wiretype end group for non-group") + return fmt.Errorf("proto: AwsResourceRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlueGreenStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AwsResourceRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreviewSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreviewSelector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ARN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ARN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FullName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlueGreenStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlueGreenStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlueGreenStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviewSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviewSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveSelector", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20394,7 +21160,166 @@ func (m *DatadogMetric) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queries == nil { + m.Queries = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Queries[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Formula", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Formula = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20424,6 +21349,38 @@ func (m *DatadogMetric) Unmarshal(dAtA []byte) error { } m.ApiVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aggregator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aggregator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21660,6 +22617,39 @@ func (m *ExperimentSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRunMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AnalysisRunMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -25859,7 +26849,7 @@ func (m *NginxTrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ObjectRef) Unmarshal(dAtA []byte) error { +func (m *OAuth2Config) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25882,15 +26872,15 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ObjectRef: wiretype end group for non-group") + return fmt.Errorf("proto: OAuth2Config: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OAuth2Config: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TokenURL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25918,11 +26908,11 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + m.TokenURL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25950,11 +26940,11 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.ClientID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientSecret", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25982,7 +26972,39 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ClientSecret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -26005,7 +27027,7 @@ func (m *ObjectRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *PauseCondition) Unmarshal(dAtA []byte) error { +func (m *ObjectRef) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26028,15 +27050,15 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PauseCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ObjectRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PauseCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ObjectRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26064,13 +27086,13 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = PauseReason(dAtA[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26080,78 +27102,27 @@ func (m *PauseCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PingPongSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PingPongSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PingPongSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PingService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26179,11 +27150,11 @@ func (m *PingPongSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PingService = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PongService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26211,7 +27182,7 @@ func (m *PingPongSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PongService = string(dAtA[iNdEx:postIndex]) + m.ScaleDown = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -26234,7 +27205,7 @@ func (m *PingPongSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { +func (m *PauseCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26257,15 +27228,244 @@ func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodTemplateMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: PauseCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PauseCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = PauseReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingPongSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingPongSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingPongSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PingService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PongService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PongService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26607,89 +27807,6 @@ func (m *PreferredDuringSchedulingIgnoredDuringExecution) Unmarshal(dAtA []byte) } return nil } -func (m *PrometheusAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrometheusAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrometheusAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sigv4", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Sigv4.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PrometheusMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -27237,7 +28354,7 @@ func (m *RolloutAnalysis) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Templates = append(m.Templates, RolloutAnalysisTemplate{}) + m.Templates = append(m.Templates, AnalysisTemplateRef{}) if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -27647,108 +28764,6 @@ func (m *RolloutAnalysisRunStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *RolloutAnalysisTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RolloutAnalysisTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RolloutAnalysisTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TemplateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClusterScope = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *RolloutCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -28122,6 +29137,73 @@ func (m *RolloutExperimentStep) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, DryRun{}) + if err := m.DryRun[len(m.DryRun)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnalysisRunMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AnalysisRunMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30586,6 +31668,26 @@ func (m *RolloutTrafficRouting) Unmarshal(dAtA []byte) error { } m.Plugins[mapkey] = ((encoding_json.RawMessage)(mapvalue)) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTrafficWeight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxTrafficWeight = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31026,15 +32128,326 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SMITrafficRouting: wiretype end group for non-group") + return fmt.Errorf("proto: SMITrafficRouting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SMITrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootService", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootService = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficSplitName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrafficSplitName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeDetail) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeDetail: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeDetail: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Start = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeyRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SMITrafficRouting: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretKeyRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootService", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31062,11 +32475,11 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RootService = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrafficSplitName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31094,7 +32507,7 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TrafficSplitName = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -31117,7 +32530,7 @@ func (m *SMITrafficRouting) Unmarshal(dAtA []byte) error { } return nil } -func (m *ScopeDetail) Unmarshal(dAtA []byte) error { +func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31140,17 +32553,17 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ScopeDetail: wiretype end group for non-group") + return fmt.Errorf("proto: SetCanaryScale: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeDetail: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetCanaryScale: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31160,29 +32573,17 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Weight = &v case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31192,29 +32593,17 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Replicas = &v case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchTrafficWeight", wireType) } - m.Step = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31224,14 +32613,65 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Step |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: + m.MatchTrafficWeight = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetHeaderRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetHeaderRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31259,13 +32699,13 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Start = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31275,23 +32715,25 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.End = string(dAtA[iNdEx:postIndex]) + m.Match = append(m.Match, HeaderRoutingMatch{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -31314,7 +32756,7 @@ func (m *ScopeDetail) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { +func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31337,10 +32779,10 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretKeyRef: wiretype end group for non-group") + return fmt.Errorf("proto: SetMirrorRoute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretKeyRef: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetMirrorRoute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -31377,9 +32819,9 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31389,24 +32831,46 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Match = append(m.Match, RouteMatch{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Percentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Percentage = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31428,7 +32892,7 @@ func (m *SecretKeyRef) Unmarshal(dAtA []byte) error { } return nil } -func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { +func (m *Sigv4Config) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31451,17 +32915,17 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetCanaryScale: wiretype end group for non-group") + return fmt.Errorf("proto: Sigv4Config: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetCanaryScale: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Sigv4Config: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31471,17 +32935,29 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Weight = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31491,17 +32967,29 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchTrafficWeight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleARN", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31511,12 +32999,24 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.MatchTrafficWeight = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleARN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31538,7 +33038,7 @@ func (m *SetCanaryScale) Unmarshal(dAtA []byte) error { } return nil } -func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { +func (m *SkyWalkingMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31561,15 +33061,15 @@ func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetHeaderRoute: wiretype end group for non-group") + return fmt.Errorf("proto: SkyWalkingMetric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetHeaderRoute: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SkyWalkingMetric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31597,13 +33097,13 @@ func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31613,25 +33113,55 @@ func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Match = append(m.Match, HeaderRoutingMatch{}) - if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Interval = DurationString(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -31654,7 +33184,7 @@ func (m *SetHeaderRoute) Unmarshal(dAtA []byte) error { } return nil } -func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { +func (m *StickinessConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31677,17 +33207,17 @@ func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetMirrorRoute: wiretype end group for non-group") + return fmt.Errorf("proto: StickinessConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetMirrorRoute: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StickinessConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31697,63 +33227,17 @@ func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Enabled = bool(v != 0) case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Match = append(m.Match, RouteMatch{}) - if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Percentage", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DurationSeconds", wireType) } - var v int32 + m.DurationSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31763,12 +33247,11 @@ func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.DurationSeconds |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.Percentage = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31790,7 +33273,7 @@ func (m *SetMirrorRoute) Unmarshal(dAtA []byte) error { } return nil } -func (m *Sigv4Config) Unmarshal(dAtA []byte) error { +func (m *StringMatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31813,15 +33296,15 @@ func (m *Sigv4Config) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Sigv4Config: wiretype end group for non-group") + return fmt.Errorf("proto: StringMatch: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Sigv4Config: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StringMatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31849,11 +33332,11 @@ func (m *Sigv4Config) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Region = string(dAtA[iNdEx:postIndex]) + m.Exact = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31881,11 +33364,11 @@ func (m *Sigv4Config) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Profile = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleARN", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31913,7 +33396,7 @@ func (m *Sigv4Config) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RoleARN = string(dAtA[iNdEx:postIndex]) + m.Regex = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -31936,7 +33419,7 @@ func (m *Sigv4Config) Unmarshal(dAtA []byte) error { } return nil } -func (m *SkyWalkingMetric) Unmarshal(dAtA []byte) error { +func (m *TCPRoute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31959,81 +33442,17 @@ func (m *SkyWalkingMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SkyWalkingMetric: wiretype end group for non-group") + return fmt.Errorf("proto: TCPRoute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SkyWalkingMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TCPRoute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32043,24 +33462,11 @@ func (m *SkyWalkingMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Port |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Interval = DurationString(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32082,7 +33488,7 @@ func (m *SkyWalkingMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *StickinessConfig) Unmarshal(dAtA []byte) error { +func (m *TLSRoute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32105,17 +33511,17 @@ func (m *StickinessConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StickinessConfig: wiretype end group for non-group") + return fmt.Errorf("proto: TLSRoute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StickinessConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TLSRoute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var v int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32125,17 +33531,16 @@ func (m *StickinessConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Port |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.Enabled = bool(v != 0) case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SNIHosts", wireType) } - m.DurationSeconds = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32145,11 +33550,24 @@ func (m *StickinessConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DurationSeconds |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SNIHosts = append(m.SNIHosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32171,7 +33589,7 @@ func (m *StickinessConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *StringMatch) Unmarshal(dAtA []byte) error { +func (m *TTLStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32194,17 +33612,17 @@ func (m *StringMatch) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StringMatch: wiretype end group for non-group") + return fmt.Errorf("proto: TTLStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StringMatch: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TTLStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterCompletion", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32214,143 +33632,17 @@ func (m *StringMatch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exact = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.SecondsAfterCompletion = &v case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Regex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TCPRoute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TCPRoute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TCPRoute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterFailure", wireType) } - m.Port = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32360,85 +33652,17 @@ func (m *TCPRoute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TLSRoute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TLSRoute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TLSRoute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.SecondsAfterFailure = &v + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecondsAfterSuccess", wireType) } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SNIHosts", wireType) - } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32448,24 +33672,12 @@ func (m *TLSRoute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SNIHosts = append(m.SNIHosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.SecondsAfterSuccess = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33890,6 +35102,39 @@ func (m *WebMetric) Unmarshal(dAtA []byte) error { m.JSONBody = []byte{} } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authentication", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Authentication.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/rollouts/v1alpha1/generated.proto b/pkg/apis/rollouts/v1alpha1/generated.proto index 1b3135874c..eebb4453cc 100644 --- a/pkg/apis/rollouts/v1alpha1/generated.proto +++ b/pkg/apis/rollouts/v1alpha1/generated.proto @@ -1,6 +1,4 @@ /* -Copyright 2024 The Kubernetes sample-controller Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -145,6 +143,10 @@ message AnalysisRunSpec { // +patchStrategy=merge // +optional repeated MeasurementRetention measurementRetention = 5; + + // TTLStrategy object contains the strategy for the time to live depending on if the analysis succeeded or failed + // +optional + optional TTLStrategy ttlStrategy = 6; } // AnalysisRunStatus is the status for a AnalysisRun resource @@ -166,6 +168,9 @@ message AnalysisRunStatus { // DryRunSummary contains the final results from the metric executions in the dry-run mode optional RunSummary dryRunSummary = 6; + + // CompletedAt indicates when the analysisRun completed + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completedAt = 7; } // AnalysisRunStrategy configuration for the analysis runs and experiments to retain @@ -197,6 +202,16 @@ message AnalysisTemplateList { repeated AnalysisTemplate items = 2; } +message AnalysisTemplateRef { + // TemplateName name of template to use in AnalysisRun + // +optional + optional string templateName = 1; + + // Whether to look for the templateName at cluster scope or namespace scope + // +optional + optional bool clusterScope = 2; +} + // AnalysisTemplateSpec is the specification for a AnalysisTemplate resource message AnalysisTemplateSpec { // Metrics contains the list of metrics to query as part of an analysis run @@ -221,6 +236,11 @@ message AnalysisTemplateSpec { // +patchStrategy=merge // +optional repeated MeasurementRetention measurementRetention = 4; + + // Templates reference to a list of analysis templates to combine with the rest of the metrics for an AnalysisRun + // +patchMergeKey=templateName + // +patchStrategy=merge + repeated AnalysisTemplateRef templates = 5; } // AntiAffinity defines which inter-pod scheduling rule to use for anti-affinity injection @@ -303,6 +323,17 @@ message ArgumentValueFrom { optional FieldRef fieldRef = 2; } +// Authentication method +message Authentication { + // Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus + // +optional + optional Sigv4Config sigv4 = 1; + + // OAuth2 config + // +optional + optional OAuth2Config oauth2 = 2; +} + message AwsResourceRef { optional string name = 1; @@ -606,12 +637,27 @@ message ClusterAnalysisTemplateList { } message DatadogMetric { + // +kubebuilder:default="5m" + // Interval refers to the Interval time window in Datadog (default: 5m). Not to be confused with the polling rate for the metric. optional string interval = 1; optional string query = 2; + // Queries is a map of query_name_as_key: query. You can then use query_name_as_key inside Formula.Used for v2 + // +kubebuilder:validation:Type=object + map queries = 3; + + // Formula refers to the Formula made up of the queries. Only useful with Queries. Used for v2 + optional string formula = 4; + // ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated. - optional string apiVersion = 3; + // +kubebuilder:validation:Enum=v1;v2 + // +kubebuilder:default=v1 + optional string apiVersion = 5; + + // +kubebuilder:validation:Enum=avg;min;max;sum;last;percentile;mean;l2norm;area + // Aggregator is a type of aggregator to use for metrics-based queries (default: ""). Used for v2 + optional string aggregator = 6; } // DryRun defines the settings for running the analysis in Dry-Run mode. @@ -745,6 +791,10 @@ message ExperimentSpec { // +patchStrategy=merge // +optional repeated MeasurementRetention measurementRetention = 8; + + // AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns + // +optional + optional AnalysisRunMetadata analysisRunMetadata = 9; } // ExperimentStatus is the status for a Experiment resource @@ -1081,6 +1131,21 @@ message NginxTrafficRouting { repeated string stableIngresses = 4; } +message OAuth2Config { + // OAuth2 provider token URL + optional string tokenUrl = 1; + + // OAuth2 client ID + optional string clientId = 2; + + // OAuth2 client secret + optional string clientSecret = 3; + + // OAuth2 scopes + // +optional + repeated string scopes = 4; +} + // ObjectRef holds a references to the Kubernetes object message ObjectRef { // API Version of the referent @@ -1091,6 +1156,9 @@ message ObjectRef { // Name of the referent optional string name = 3; + + // Automatically scale down deployment + optional string scaleDown = 4; } // PauseCondition the reason for a pause and when it started @@ -1126,12 +1194,6 @@ message PreferredDuringSchedulingIgnoredDuringExecution { optional int32 weight = 1; } -// PrometheusMetric defines the prometheus query to perform canary analysis -message PrometheusAuth { - // +optional - optional Sigv4Config sigv4 = 3; -} - // PrometheusMetric defines the prometheus query to perform canary analysis message PrometheusMetric { // Address is the HTTP address and port of the prometheus server @@ -1140,9 +1202,9 @@ message PrometheusMetric { // Query is a raw prometheus query to perform optional string query = 2; - // Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus + // Authentication details // +optional - optional PrometheusAuth authentication = 3; + optional Authentication authentication = 3; // Timeout represents the duration within which a prometheus query should complete. It is expressed in seconds. // +optional @@ -1180,7 +1242,7 @@ message RolloutAnalysis { // Templates reference to a list of analysis templates to combine for an AnalysisRun // +patchMergeKey=templateName // +patchStrategy=merge - repeated RolloutAnalysisTemplate templates = 1; + repeated AnalysisTemplateRef templates = 1; // Args the arguments that will be added to the AnalysisRuns // +patchMergeKey=name @@ -1221,16 +1283,6 @@ message RolloutAnalysisRunStatus { optional string message = 3; } -message RolloutAnalysisTemplate { - // TemplateName name of template to use in AnalysisRun - // +optional - optional string templateName = 1; - - // Whether to look for the templateName at cluster scope or namespace scope - // +optional - optional bool clusterScope = 2; -} - // RolloutCondition describes the state of a rollout at a certain point. message RolloutCondition { // Type of deployment condition. @@ -1267,6 +1319,16 @@ message RolloutExperimentStep { // +patchMergeKey=name // +patchStrategy=merge repeated RolloutExperimentStepAnalysisTemplateRef analyses = 3; + + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + repeated DryRun dryRun = 4; + + // AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns + // +optional + optional AnalysisRunMetadata analysisRunMetadata = 5; } message RolloutExperimentStepAnalysisTemplateRef { @@ -1548,6 +1610,9 @@ message RolloutTrafficRouting { // +kubebuilder:validation:Type=object // Plugins holds specific configuration that traffic router plugins can use for routing traffic map plugins = 10; + + // MaxTrafficWeight The total weight of traffic. If unspecified, it defaults to 100 + optional int32 maxTrafficWeight = 11; } message RouteMatch { @@ -1704,6 +1769,18 @@ message TLSRoute { repeated string sniHosts = 2; } +// TTLStrategy defines the strategy for the time to live depending on if the analysis succeeded or failed +message TTLStrategy { + // SecondsAfterCompletion is the number of seconds to live after completion. + optional int32 secondsAfterCompletion = 1; + + // SecondsAfterFailure is the number of seconds to live after failure. + optional int32 secondsAfterFailure = 2; + + // SecondsAfterSuccess is the number of seconds to live after success. + optional int32 secondsAfterSuccess = 3; +} + message TemplateService { // Name of the service generated by the experiment optional string name = 1; @@ -1847,6 +1924,10 @@ message WebMetric { // +kubebuilder:validation:Type=object // JSONBody is the body of the web metric in a json format (method must be POST/PUT) optional bytes jsonBody = 8; + + // Authentication details + // +optional + optional Authentication authentication = 9; } message WebMetricHeader { diff --git a/pkg/apis/rollouts/v1alpha1/openapi_generated.go b/pkg/apis/rollouts/v1alpha1/openapi_generated.go index ebd6ce7786..6c18e64a2b 100644 --- a/pkg/apis/rollouts/v1alpha1/openapi_generated.go +++ b/pkg/apis/rollouts/v1alpha1/openapi_generated.go @@ -2,8 +2,6 @@ // +build !ignore_autogenerated /* -Copyright 2024 The Kubernetes sample-controller Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -42,6 +40,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunStrategy": schema_pkg_apis_rollouts_v1alpha1_AnalysisRunStrategy(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplate": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplate(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateList": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateList(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateSpec": schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AntiAffinity": schema_pkg_apis_rollouts_v1alpha1_AntiAffinity(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ApisixRoute": schema_pkg_apis_rollouts_v1alpha1_ApisixRoute(ref), @@ -52,6 +51,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AppMeshVirtualService": schema_pkg_apis_rollouts_v1alpha1_AppMeshVirtualService(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument": schema_pkg_apis_rollouts_v1alpha1_Argument(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ArgumentValueFrom": schema_pkg_apis_rollouts_v1alpha1_ArgumentValueFrom(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Authentication": schema_pkg_apis_rollouts_v1alpha1_Authentication(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AwsResourceRef": schema_pkg_apis_rollouts_v1alpha1_AwsResourceRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStatus": schema_pkg_apis_rollouts_v1alpha1_BlueGreenStatus(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.BlueGreenStrategy": schema_pkg_apis_rollouts_v1alpha1_BlueGreenStrategy(ref), @@ -93,12 +93,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MetricResult": schema_pkg_apis_rollouts_v1alpha1_MetricResult(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NewRelicMetric": schema_pkg_apis_rollouts_v1alpha1_NewRelicMetric(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.NginxTrafficRouting": schema_pkg_apis_rollouts_v1alpha1_NginxTrafficRouting(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.OAuth2Config": schema_pkg_apis_rollouts_v1alpha1_OAuth2Config(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ObjectRef": schema_pkg_apis_rollouts_v1alpha1_ObjectRef(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PauseCondition": schema_pkg_apis_rollouts_v1alpha1_PauseCondition(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PingPongSpec": schema_pkg_apis_rollouts_v1alpha1_PingPongSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PodTemplateMetadata": schema_pkg_apis_rollouts_v1alpha1_PodTemplateMetadata(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PreferredDuringSchedulingIgnoredDuringExecution": schema_pkg_apis_rollouts_v1alpha1_PreferredDuringSchedulingIgnoredDuringExecution(ref), - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusAuth": schema_pkg_apis_rollouts_v1alpha1_PrometheusAuth(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusMetric": schema_pkg_apis_rollouts_v1alpha1_PrometheusMetric(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RequiredDuringSchedulingIgnoredDuringExecution": schema_pkg_apis_rollouts_v1alpha1_RequiredDuringSchedulingIgnoredDuringExecution(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RollbackWindowSpec": schema_pkg_apis_rollouts_v1alpha1_RollbackWindowSpec(ref), @@ -106,7 +106,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysis": schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysis(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisBackground": schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisBackground(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisRunStatus": schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisRunStatus(ref), - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate": schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisTemplate(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutCondition": schema_pkg_apis_rollouts_v1alpha1_RolloutCondition(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStep": schema_pkg_apis_rollouts_v1alpha1_RolloutExperimentStep(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStepAnalysisTemplateRef": schema_pkg_apis_rollouts_v1alpha1_RolloutExperimentStepAnalysisTemplateRef(ref), @@ -131,6 +130,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.StringMatch": schema_pkg_apis_rollouts_v1alpha1_StringMatch(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TCPRoute": schema_pkg_apis_rollouts_v1alpha1_TCPRoute(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TLSRoute": schema_pkg_apis_rollouts_v1alpha1_TLSRoute(ref), + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TTLStrategy": schema_pkg_apis_rollouts_v1alpha1_TTLStrategy(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateService": schema_pkg_apis_rollouts_v1alpha1_TemplateService(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec": schema_pkg_apis_rollouts_v1alpha1_TemplateSpec(ref), "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateStatus": schema_pkg_apis_rollouts_v1alpha1_TemplateStatus(ref), @@ -550,12 +550,18 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisRunSpec(ref common.ReferenceCallb }, }, }, + "ttlStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "TTLStrategy object contains the strategy for the time to live depending on if the analysis succeeded or failed", + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TTLStrategy"), + }, + }, }, Required: []string{"metrics"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TTLStrategy"}, } } @@ -614,6 +620,12 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisRunStatus(ref common.ReferenceCal Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RunSummary"), }, }, + "completedAt": { + SchemaProps: spec.SchemaProps{ + Description: "CompletedAt indicates when the analysisRun completed", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, Required: []string{"phase"}, }, @@ -741,6 +753,33 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateList(ref common.Reference } } +func schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "templateName": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateName name of template to use in AnalysisRun", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "clusterScope": { + SchemaProps: spec.SchemaProps{ + Description: "Whether to look for the templateName at cluster scope or namespace scope", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -828,12 +867,31 @@ func schema_pkg_apis_rollouts_v1alpha1_AnalysisTemplateSpec(ref common.Reference }, }, }, + "templates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "templateName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Templates reference to a list of analysis templates to combine with the rest of the metrics for an AnalysisRun", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef"), + }, + }, + }, + }, + }, }, - Required: []string{"metrics"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Argument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Metric"}, } } @@ -1099,6 +1157,35 @@ func schema_pkg_apis_rollouts_v1alpha1_ArgumentValueFrom(ref common.ReferenceCal } } +func schema_pkg_apis_rollouts_v1alpha1_Authentication(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Authentication method", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "sigv4": { + SchemaProps: spec.SchemaProps{ + Description: "Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Sigv4Config"), + }, + }, + "oauth2": { + SchemaProps: spec.SchemaProps{ + Description: "OAuth2 config", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.OAuth2Config"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.OAuth2Config", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Sigv4Config"}, + } +} + func schema_pkg_apis_rollouts_v1alpha1_AwsResourceRef(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1620,8 +1707,7 @@ func schema_pkg_apis_rollouts_v1alpha1_CloudWatchMetricStat(ref common.Reference }, "period": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, "stat": { @@ -1805,16 +1891,39 @@ func schema_pkg_apis_rollouts_v1alpha1_DatadogMetric(ref common.ReferenceCallbac Type: []string{"object"}, Properties: map[string]spec.Schema{ "interval": { + SchemaProps: spec.SchemaProps{ + Description: "Interval refers to the Interval time window in Datadog (default: 5m). Not to be confused with the polling rate for the metric.", + Type: []string{"string"}, + Format: "", + }, + }, + "query": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", }, }, - "query": { + "queries": { SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Description: "Queries is a map of query_name_as_key: query. You can then use query_name_as_key inside Formula.Used for v2", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "formula": { + SchemaProps: spec.SchemaProps{ + Description: "Formula refers to the Formula made up of the queries. Only useful with Queries. Used for v2", + Type: []string{"string"}, + Format: "", }, }, "apiVersion": { @@ -1824,8 +1933,14 @@ func schema_pkg_apis_rollouts_v1alpha1_DatadogMetric(ref common.ReferenceCallbac Format: "", }, }, + "aggregator": { + SchemaProps: spec.SchemaProps{ + Description: "Aggregator is a type of aggregator to use for metrics-based queries (default: \"\"). Used for v2", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"query"}, }, }, } @@ -2036,14 +2151,12 @@ func schema_pkg_apis_rollouts_v1alpha1_ExperimentCondition(ref common.ReferenceC "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2236,12 +2349,19 @@ func schema_pkg_apis_rollouts_v1alpha1_ExperimentSpec(ref common.ReferenceCallba }, }, }, + "analysisRunMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata"), + }, + }, }, Required: []string{"templates"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.ExperimentAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.TemplateSpec"}, } } @@ -3242,6 +3362,54 @@ func schema_pkg_apis_rollouts_v1alpha1_NginxTrafficRouting(ref common.ReferenceC } } +func schema_pkg_apis_rollouts_v1alpha1_OAuth2Config(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "tokenUrl": { + SchemaProps: spec.SchemaProps{ + Description: "OAuth2 provider token URL", + Type: []string{"string"}, + Format: "", + }, + }, + "clientId": { + SchemaProps: spec.SchemaProps{ + Description: "OAuth2 client ID", + Type: []string{"string"}, + Format: "", + }, + }, + "clientSecret": { + SchemaProps: spec.SchemaProps{ + Description: "OAuth2 client secret", + Type: []string{"string"}, + Format: "", + }, + }, + "scopes": { + SchemaProps: spec.SchemaProps{ + Description: "OAuth2 scopes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_ObjectRef(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3270,6 +3438,13 @@ func schema_pkg_apis_rollouts_v1alpha1_ObjectRef(ref common.ReferenceCallback) c Format: "", }, }, + "scaleDown": { + SchemaProps: spec.SchemaProps{ + Description: "Automatically scale down deployment", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -3292,8 +3467,7 @@ func schema_pkg_apis_rollouts_v1alpha1_PauseCondition(ref common.ReferenceCallba }, "startTime": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, }, @@ -3402,27 +3576,6 @@ func schema_pkg_apis_rollouts_v1alpha1_PreferredDuringSchedulingIgnoredDuringExe } } -func schema_pkg_apis_rollouts_v1alpha1_PrometheusAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusMetric defines the prometheus query to perform canary analysis", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "sigv4": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Sigv4Config"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Sigv4Config"}, - } -} - func schema_pkg_apis_rollouts_v1alpha1_PrometheusMetric(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3446,9 +3599,9 @@ func schema_pkg_apis_rollouts_v1alpha1_PrometheusMetric(ref common.ReferenceCall }, "authentication": { SchemaProps: spec.SchemaProps{ - Description: "Sigv4 Config is the aws SigV4 configuration to use for SigV4 signing if using Amazon Managed Prometheus", + Description: "Authentication details", Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusAuth"), + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Authentication"), }, }, "timeout": { @@ -3489,7 +3642,7 @@ func schema_pkg_apis_rollouts_v1alpha1_PrometheusMetric(ref common.ReferenceCall }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.PrometheusAuth", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetricHeader"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Authentication", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetricHeader"}, } } @@ -3591,7 +3744,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysis(ref common.ReferenceCallb Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"), + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef"), }, }, }, @@ -3668,7 +3821,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysis(ref common.ReferenceCallb }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"}, } } @@ -3693,7 +3846,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisBackground(ref common.Refe Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"), + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef"), }, }, }, @@ -3777,7 +3930,7 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisBackground(ref common.Refe }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutAnalysisTemplate"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunArgument", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.MeasurementRetention"}, } } @@ -3814,33 +3967,6 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisRunStatus(ref common.Refer } } -func schema_pkg_apis_rollouts_v1alpha1_RolloutAnalysisTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "templateName": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateName name of template to use in AnalysisRun", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterScope": { - SchemaProps: spec.SchemaProps{ - Description: "Whether to look for the templateName at cluster scope or namespace scope", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - func schema_pkg_apis_rollouts_v1alpha1_RolloutCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3867,14 +3993,12 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutCondition(ref common.ReferenceCall "lastUpdateTime": { SchemaProps: spec.SchemaProps{ Description: "The last time this condition was updated.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", - Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -3957,12 +4081,39 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutExperimentStep(ref common.Referenc }, }, }, + "dryRun": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "metricName", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "DryRun object contains the settings for running the analysis in Dry-Run mode", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun"), + }, + }, + }, + }, + }, + "analysisRunMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata"), + }, + }, }, Required: []string{"templates"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStepAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentTemplate"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.AnalysisRunMetadata", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.DryRun", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentStepAnalysisTemplateRef", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.RolloutExperimentTemplate"}, } } @@ -4591,6 +4742,13 @@ func schema_pkg_apis_rollouts_v1alpha1_RolloutTrafficRouting(ref common.Referenc }, }, }, + "maxTrafficWeight": { + SchemaProps: spec.SchemaProps{ + Description: "MaxTrafficWeight The total weight of traffic. If unspecified, it defaults to 100", + Type: []string{"integer"}, + Format: "int32", + }, + }, }, }, }, @@ -5084,6 +5242,40 @@ func schema_pkg_apis_rollouts_v1alpha1_TLSRoute(ref common.ReferenceCallback) co } } +func schema_pkg_apis_rollouts_v1alpha1_TTLStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TTLStrategy defines the strategy for the time to live depending on if the analysis succeeded or failed", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secondsAfterCompletion": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterCompletion is the number of seconds to live after completion.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "secondsAfterFailure": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterFailure is the number of seconds to live after failure.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "secondsAfterSuccess": { + SchemaProps: spec.SchemaProps{ + Description: "SecondsAfterSuccess is the number of seconds to live after success.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_rollouts_v1alpha1_TemplateService(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -5458,12 +5650,19 @@ func schema_pkg_apis_rollouts_v1alpha1_WebMetric(ref common.ReferenceCallback) c Format: "byte", }, }, + "authentication": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication details", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Authentication"), + }, + }, }, Required: []string{"url"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetricHeader"}, + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.Authentication", "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1.WebMetricHeader"}, } } diff --git a/pkg/apis/rollouts/v1alpha1/types.go b/pkg/apis/rollouts/v1alpha1/types.go index f2c2dbe7fe..2ab4ea1018 100755 --- a/pkg/apis/rollouts/v1alpha1/types.go +++ b/pkg/apis/rollouts/v1alpha1/types.go @@ -137,6 +137,8 @@ type ObjectRef struct { Kind string `json:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` // Name of the referent Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // Automatically scale down deployment + ScaleDown string `json:"scaleDown,omitempty" protobuf:"bytes,4,opt,name=scaleDown"` } const ( @@ -377,11 +379,15 @@ type RolloutTrafficRouting struct { ManagedRoutes []MangedRoutes `json:"managedRoutes,omitempty" protobuf:"bytes,8,rep,name=managedRoutes"` // Apisix holds specific configuration to use Apisix to route traffic Apisix *ApisixTrafficRouting `json:"apisix,omitempty" protobuf:"bytes,9,opt,name=apisix"` + // +kubebuilder:validation:Schemaless // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Type=object // Plugins holds specific configuration that traffic router plugins can use for routing traffic Plugins map[string]json.RawMessage `json:"plugins,omitempty" protobuf:"bytes,10,opt,name=plugins"` + + // MaxTrafficWeight The total weight of traffic. If unspecified, it defaults to 100 + MaxTrafficWeight *int32 `json:"maxTrafficWeight,omitempty" protobuf:"varint,11,opt,name=maxTrafficWeight"` } type MangedRoutes struct { @@ -531,6 +537,14 @@ type RolloutExperimentStep struct { // +patchMergeKey=name // +patchStrategy=merge Analyses []RolloutExperimentStepAnalysisTemplateRef `json:"analyses,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,3,rep,name=analyses"` + // DryRun object contains the settings for running the analysis in Dry-Run mode + // +patchMergeKey=metricName + // +patchStrategy=merge + // +optional + DryRun []DryRun `json:"dryRun,omitempty" patchStrategy:"merge" patchMergeKey:"metricName" protobuf:"bytes,4,rep,name=dryRun"` + // AnalysisRunMetadata labels and annotations that will be added to the AnalysisRuns + // +optional + AnalysisRunMetadata AnalysisRunMetadata `json:"analysisRunMetadata,omitempty" protobuf:"bytes,5,opt,name=analysisRunMetadata"` } type RolloutExperimentStepAnalysisTemplateRef struct { @@ -701,7 +715,7 @@ type RolloutAnalysis struct { // Templates reference to a list of analysis templates to combine for an AnalysisRun // +patchMergeKey=templateName // +patchStrategy=merge - Templates []RolloutAnalysisTemplate `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"templateName" protobuf:"bytes,1,rep,name=templates"` + Templates []AnalysisTemplateRef `json:"templates,omitempty" patchStrategy:"merge" patchMergeKey:"templateName" protobuf:"bytes,1,rep,name=templates"` // Args the arguments that will be added to the AnalysisRuns // +patchMergeKey=name // +patchStrategy=merge @@ -721,7 +735,7 @@ type RolloutAnalysis struct { AnalysisRunMetadata AnalysisRunMetadata `json:"analysisRunMetadata,omitempty" protobuf:"bytes,5,opt,name=analysisRunMetadata"` } -type RolloutAnalysisTemplate struct { +type AnalysisTemplateRef struct { //TemplateName name of template to use in AnalysisRun // +optional TemplateName string `json:"templateName" protobuf:"bytes,1,opt,name=templateName"` @@ -1081,3 +1095,9 @@ type RolloutList struct { type RollbackWindowSpec struct { Revisions int32 `json:"revisions,omitempty" protobuf:"varint,1,opt,name=revisions"` } + +const ( + ScaleDownNever string = "never" + ScaleDownOnSuccess string = "onsuccess" + ScaleDownProgressively string = "progressively" +) diff --git a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go index 2ec2d88b97..496cedc2a0 100644 --- a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go @@ -234,6 +234,11 @@ func (in *AnalysisRunSpec) DeepCopyInto(out *AnalysisRunSpec) { *out = make([]MeasurementRetention, len(*in)) copy(*out, *in) } + if in.TTLStrategy != nil { + in, out := &in.TTLStrategy, &out.TTLStrategy + *out = new(TTLStrategy) + (*in).DeepCopyInto(*out) + } return } @@ -267,6 +272,10 @@ func (in *AnalysisRunStatus) DeepCopyInto(out *AnalysisRunStatus) { *out = new(RunSummary) **out = **in } + if in.CompletedAt != nil { + in, out := &in.CompletedAt, &out.CompletedAt + *out = (*in).DeepCopy() + } return } @@ -366,6 +375,22 @@ func (in *AnalysisTemplateList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisTemplateRef) DeepCopyInto(out *AnalysisTemplateRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisTemplateRef. +func (in *AnalysisTemplateRef) DeepCopy() *AnalysisTemplateRef { + if in == nil { + return nil + } + out := new(AnalysisTemplateRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AnalysisTemplateSpec) DeepCopyInto(out *AnalysisTemplateSpec) { *out = *in @@ -393,6 +418,11 @@ func (in *AnalysisTemplateSpec) DeepCopyInto(out *AnalysisTemplateSpec) { *out = make([]MeasurementRetention, len(*in)) copy(*out, *in) } + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]AnalysisTemplateRef, len(*in)) + copy(*out, *in) + } return } @@ -615,6 +645,24 @@ func (in *ArgumentValueFrom) DeepCopy() *ArgumentValueFrom { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.Sigv4 = in.Sigv4 + in.OAuth2.DeepCopyInto(&out.OAuth2) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AwsResourceRef) DeepCopyInto(out *AwsResourceRef) { *out = *in @@ -1080,6 +1128,13 @@ func (in *ClusterAnalysisTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatadogMetric) DeepCopyInto(out *DatadogMetric) { *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1264,6 +1319,7 @@ func (in *ExperimentSpec) DeepCopyInto(out *ExperimentSpec) { *out = make([]MeasurementRetention, len(*in)) copy(*out, *in) } + in.AnalysisRunMetadata.DeepCopyInto(&out.AnalysisRunMetadata) return } @@ -1666,7 +1722,7 @@ func (in *MetricProvider) DeepCopyInto(out *MetricProvider) { if in.Datadog != nil { in, out := &in.Datadog, &out.Datadog *out = new(DatadogMetric) - **out = **in + (*in).DeepCopyInto(*out) } if in.Wavefront != nil { in, out := &in.Wavefront, &out.Wavefront @@ -1805,6 +1861,27 @@ func (in *NginxTrafficRouting) DeepCopy() *NginxTrafficRouting { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth2Config) DeepCopyInto(out *OAuth2Config) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth2Config. +func (in *OAuth2Config) DeepCopy() *OAuth2Config { + if in == nil { + return nil + } + out := new(OAuth2Config) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectRef) DeepCopyInto(out *ObjectRef) { *out = *in @@ -1900,27 +1977,10 @@ func (in *PreferredDuringSchedulingIgnoredDuringExecution) DeepCopy() *Preferred return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusAuth) DeepCopyInto(out *PrometheusAuth) { - *out = *in - out.Sigv4 = in.Sigv4 - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusAuth. -func (in *PrometheusAuth) DeepCopy() *PrometheusAuth { - if in == nil { - return nil - } - out := new(PrometheusAuth) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrometheusMetric) DeepCopyInto(out *PrometheusMetric) { *out = *in - out.Authentication = in.Authentication + in.Authentication.DeepCopyInto(&out.Authentication) if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout *out = new(int64) @@ -2009,7 +2069,7 @@ func (in *RolloutAnalysis) DeepCopyInto(out *RolloutAnalysis) { *out = *in if in.Templates != nil { in, out := &in.Templates, &out.Templates - *out = make([]RolloutAnalysisTemplate, len(*in)) + *out = make([]AnalysisTemplateRef, len(*in)) copy(*out, *in) } if in.Args != nil { @@ -2081,22 +2141,6 @@ func (in *RolloutAnalysisRunStatus) DeepCopy() *RolloutAnalysisRunStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolloutAnalysisTemplate) DeepCopyInto(out *RolloutAnalysisTemplate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAnalysisTemplate. -func (in *RolloutAnalysisTemplate) DeepCopy() *RolloutAnalysisTemplate { - if in == nil { - return nil - } - out := new(RolloutAnalysisTemplate) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutCondition) DeepCopyInto(out *RolloutCondition) { *out = *in @@ -2132,6 +2176,12 @@ func (in *RolloutExperimentStep) DeepCopyInto(out *RolloutExperimentStep) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]DryRun, len(*in)) + copy(*out, *in) + } + in.AnalysisRunMetadata.DeepCopyInto(&out.AnalysisRunMetadata) return } @@ -2465,6 +2515,11 @@ func (in *RolloutTrafficRouting) DeepCopyInto(out *RolloutTrafficRouting) { (*out)[key] = outVal } } + if in.MaxTrafficWeight != nil { + in, out := &in.MaxTrafficWeight, &out.MaxTrafficWeight + *out = new(int32) + **out = **in + } return } @@ -2753,6 +2808,37 @@ func (in *TLSRoute) DeepCopy() *TLSRoute { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TTLStrategy) DeepCopyInto(out *TTLStrategy) { + *out = *in + if in.SecondsAfterCompletion != nil { + in, out := &in.SecondsAfterCompletion, &out.SecondsAfterCompletion + *out = new(int32) + **out = **in + } + if in.SecondsAfterFailure != nil { + in, out := &in.SecondsAfterFailure, &out.SecondsAfterFailure + *out = new(int32) + **out = **in + } + if in.SecondsAfterSuccess != nil { + in, out := &in.SecondsAfterSuccess, &out.SecondsAfterSuccess + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TTLStrategy. +func (in *TTLStrategy) DeepCopy() *TTLStrategy { + if in == nil { + return nil + } + out := new(TTLStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateService) DeepCopyInto(out *TemplateService) { *out = *in @@ -2925,6 +3011,7 @@ func (in *WebMetric) DeepCopyInto(out *WebMetric) { *out = make(json.RawMessage, len(*in)) copy(*out, *in) } + in.Authentication.DeepCopyInto(&out.Authentication) return } diff --git a/pkg/apis/rollouts/validation/validation.go b/pkg/apis/rollouts/validation/validation.go index 94ba58f59b..8eb7cdac19 100644 --- a/pkg/apis/rollouts/validation/validation.go +++ b/pkg/apis/rollouts/validation/validation.go @@ -20,6 +20,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/weightutil" ) const ( @@ -27,8 +28,8 @@ const ( // MissingFieldMessage the message to indicate rollout is missing a field MissingFieldMessage = "Rollout has missing field '%s'" - // InvalidSetWeightMessage indicates the setweight value needs to be between 0 and 100 - InvalidSetWeightMessage = "SetWeight needs to be between 0 and 100" + // InvalidSetWeightMessage indicates the setweight value needs to be between 0 and max weight + InvalidSetWeightMessage = "SetWeight needs to be between 0 and %d" // InvalidCanaryExperimentTemplateWeightWithoutTrafficRouting indicates experiment weight cannot be set without trafficRouting InvalidCanaryExperimentTemplateWeightWithoutTrafficRouting = "Experiment template weight cannot be set unless TrafficRouting is enabled" // InvalidSetCanaryScaleTrafficPolicy indicates that TrafficRouting, required for SetCanaryScale, is missing @@ -72,14 +73,16 @@ const ( InvalidCanaryDynamicStableScale = "Canary dynamicStableScale can only be used with traffic routing" // InvalidCanaryDynamicStableScaleWithScaleDownDelay indicates that canary.dynamicStableScale cannot be used with scaleDownDelaySeconds InvalidCanaryDynamicStableScaleWithScaleDownDelay = "Canary dynamicStableScale cannot be used with scaleDownDelaySeconds" + // InvalidCanaryMaxWeightOnlySupportInNginxAndPlugins indicates that canary.maxTrafficWeight cannot be used + InvalidCanaryMaxWeightOnlySupportInNginxAndPlugins = "Canary maxTrafficWeight in traffic routing only supported in Nginx and Plugins" // InvalidPingPongProvidedMessage indicates that both ping and pong service must be set to use Ping-Pong feature InvalidPingPongProvidedMessage = "Ping service and Pong service must to be set to use Ping-Pong feature" // DuplicatedPingPongServicesMessage indicates that the rollout uses the same service for the ping and pong services DuplicatedPingPongServicesMessage = "This rollout uses the same service for the ping and pong services, but two different services are required." // MissedAlbRootServiceMessage indicates that the rollout with ALB TrafficRouting and ping pong feature enabled must have root service provided MissedAlbRootServiceMessage = "Root service field is required for the configuration with ALB and ping-pong feature enabled" - // PingPongWithAlbOnlyMessage At this moment ping-pong feature works with the ALB traffic routing only - PingPongWithAlbOnlyMessage = "Ping-pong feature works with the ALB traffic routing only" + // PingPongWithRouterOnlyMessage At this moment ping-pong feature works with the ALB traffic routing only + PingPongWithRouterOnlyMessage = "Ping-pong feature works with the ALB and Istio traffic routers only" // InvalideStepRouteNameNotFoundInManagedRoutes A step has been configured that requires managedRoutes and the route name // is missing from managedRoutes InvalideStepRouteNameNotFoundInManagedRoutes = "Steps define a route that does not exist in spec.strategy.canary.trafficRouting.managedRoutes" @@ -91,11 +94,8 @@ const ( // NOTE: this variable may need to be updated whenever we update our k8s libraries as new options // are introduced or removed. var allowAllPodValidationOptions = apivalidation.PodValidationOptions{ - AllowDownwardAPIHugePages: true, AllowInvalidPodDeletionCost: true, AllowIndivisibleHugePagesValues: true, - AllowWindowsHostProcessField: true, - AllowExpandedDNSConfig: true, } func ValidateRollout(rollout *v1alpha1.Rollout) field.ErrorList { @@ -117,7 +117,7 @@ func ValidateRolloutSpec(rollout *v1alpha1.Rollout, fldPath *field.Path) field.E message := fmt.Sprintf(MissingFieldMessage, ".spec.selector") allErrs = append(allErrs, field.Required(fldPath.Child("selector"), message)) } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, unversionedvalidation.LabelSelectorValidationOptions{}, fldPath.Child("selector"))...) if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is invalid for deployment")) } @@ -161,7 +161,7 @@ func ValidateRolloutSpec(rollout *v1alpha1.Rollout, fldPath *field.Path) field.E // Skip validating empty template for rollout resolved from ref if rollout.Spec.TemplateResolvedFromRef || spec.WorkloadRef == nil { - allErrs = append(allErrs, validation.ValidatePodTemplateSpecForReplicaSet(&template, selector, replicas, fldPath.Child("template"), allowAllPodValidationOptions)...) + allErrs = append(allErrs, validation.ValidatePodTemplateSpecForReplicaSet(&template, nil, selector, replicas, fldPath.Child("template"), allowAllPodValidationOptions)...) } } allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) @@ -232,10 +232,24 @@ func ValidateRolloutStrategyBlueGreen(rollout *v1alpha1.Rollout, fldPath *field. // canary.canaryService to be defined func requireCanaryStableServices(rollout *v1alpha1.Rollout) bool { canary := rollout.Spec.Strategy.Canary - if canary.TrafficRouting == nil || (canary.TrafficRouting.Istio != nil && canary.TrafficRouting.Istio.DestinationRule != nil) || (canary.PingPong != nil) { + + if canary.TrafficRouting == nil { + return false + } + + switch { + case canary.TrafficRouting.ALB != nil && canary.PingPong == nil, + canary.TrafficRouting.Istio != nil && canary.TrafficRouting.Istio.DestinationRule == nil && canary.PingPong == nil, + canary.TrafficRouting.SMI != nil, + canary.TrafficRouting.Apisix != nil, + canary.TrafficRouting.Ambassador != nil, + canary.TrafficRouting.Nginx != nil, + canary.TrafficRouting.AppMesh != nil, + canary.TrafficRouting.Traefik != nil: + return true + default: return false } - return true } func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Path) field.ErrorList { @@ -246,8 +260,8 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat allErrs = append(allErrs, field.Invalid(fldPath.Child("stableService"), canary.StableService, DuplicatedServicesCanaryMessage)) } if canary.PingPong != nil { - if canary.TrafficRouting != nil && canary.TrafficRouting.ALB == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("trafficRouting").Child("alb"), canary.TrafficRouting.ALB, PingPongWithAlbOnlyMessage)) + if canary.TrafficRouting != nil && canary.TrafficRouting.ALB == nil && canary.TrafficRouting.Istio == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("trafficRouting").Child("alb"), canary.TrafficRouting.ALB, PingPongWithRouterOnlyMessage)) } if canary.PingPong.PingService == "" { allErrs = append(allErrs, field.Invalid(fldPath.Child("pingPong").Child("pingService"), canary.PingPong.PingService, InvalidPingPongProvidedMessage)) @@ -282,6 +296,12 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat if canary.ScaleDownDelaySeconds != nil && canary.DynamicStableScale { allErrs = append(allErrs, field.Invalid(fldPath.Child("dynamicStableScale"), canary.DynamicStableScale, InvalidCanaryDynamicStableScaleWithScaleDownDelay)) } + // only the nginx and plugin have this support for now + if canary.TrafficRouting.MaxTrafficWeight != nil { + if canary.TrafficRouting.Nginx == nil && len(canary.TrafficRouting.Plugins) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("trafficRouting").Child("maxTrafficWeight"), canary.TrafficRouting.MaxTrafficWeight, InvalidCanaryMaxWeightOnlySupportInNginxAndPlugins)) + } + } } for i, step := range canary.Steps { @@ -293,8 +313,11 @@ func ValidateRolloutStrategyCanary(rollout *v1alpha1.Rollout, fldPath *field.Pat step.Experiment == nil, step.Pause == nil, step.SetWeight == nil, step.Analysis == nil, step.SetCanaryScale == nil, step.SetHeaderRoute == nil, step.SetMirrorRoute == nil) allErrs = append(allErrs, field.Invalid(stepFldPath, errVal, InvalidStepMessage)) } - if step.SetWeight != nil && (*step.SetWeight < 0 || *step.SetWeight > 100) { - allErrs = append(allErrs, field.Invalid(stepFldPath.Child("setWeight"), *canary.Steps[i].SetWeight, InvalidSetWeightMessage)) + + maxTrafficWeight := weightutil.MaxTrafficWeight(rollout) + + if step.SetWeight != nil && (*step.SetWeight < 0 || *step.SetWeight > maxTrafficWeight) { + allErrs = append(allErrs, field.Invalid(stepFldPath.Child("setWeight"), *canary.Steps[i].SetWeight, fmt.Sprintf(InvalidSetWeightMessage, maxTrafficWeight))) } if step.Pause != nil && step.Pause.DurationSeconds() < 0 { allErrs = append(allErrs, field.Invalid(stepFldPath.Child("pause").Child("duration"), step.Pause.DurationSeconds(), InvalidDurationMessage)) diff --git a/pkg/apis/rollouts/validation/validation_references.go b/pkg/apis/rollouts/validation/validation_references.go index 85040c71c5..9079536c20 100644 --- a/pkg/apis/rollouts/validation/validation_references.go +++ b/pkg/apis/rollouts/validation/validation_references.go @@ -127,7 +127,7 @@ func ValidateAnalysisTemplatesWithType(rollout *v1alpha1.Rollout, templates Anal templateNames := GetAnalysisTemplateNames(templates) value := fmt.Sprintf("templateNames: %s", templateNames) - _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "", "", "") + _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, make(map[string]string), make(map[string]string), "", "", "") if err != nil { allErrs = append(allErrs, field.Invalid(fldPath, value, err.Error())) return allErrs @@ -136,7 +136,7 @@ func ValidateAnalysisTemplatesWithType(rollout *v1alpha1.Rollout, templates Anal if rollout.Spec.Strategy.Canary != nil { for _, step := range rollout.Spec.Strategy.Canary.Steps { if step.Analysis != nil { - _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), step.Analysis.DryRun, step.Analysis.MeasurementRetention, "", "", "") + _, err := analysisutil.NewAnalysisRunFromTemplates(templates.AnalysisTemplates, templates.ClusterAnalysisTemplates, buildAnalysisArgs(templates.Args, rollout), step.Analysis.DryRun, step.Analysis.MeasurementRetention, make(map[string]string), make(map[string]string), "", "", "") if err != nil { allErrs = append(allErrs, field.Invalid(fldPath, value, err.Error())) return allErrs @@ -474,7 +474,7 @@ func ValidateAppMeshVirtualRouter(vrouter *unstructured.Unstructured) *field.Err } for idx, routeI := range allRoutesI { routeFldPath := routesFldPath.Index(idx) - route, ok := routeI.(map[string]interface{}) + route, ok := routeI.(map[string]any) if !ok { msg := fmt.Sprintf("Invalid route was found for AppMesh virtual-router %s at index %d", vrouter.GetName(), idx) return field.Invalid(routeFldPath, vrouter.GetName(), msg) diff --git a/pkg/apis/rollouts/validation/validation_references_test.go b/pkg/apis/rollouts/validation/validation_references_test.go index 4fc8349fdf..a8e12f0055 100644 --- a/pkg/apis/rollouts/validation/validation_references_test.go +++ b/pkg/apis/rollouts/validation/validation_references_test.go @@ -684,7 +684,7 @@ func TestValidateAnalysisTemplatesWithType(t *testing.T) { rollout := getAlbRollout("alb-ingress") rollout.Spec.Strategy.Canary.Steps = append(rollout.Spec.Strategy.Canary.Steps, v1alpha1.CanaryStep{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: "analysis-template-name", }, diff --git a/pkg/apis/rollouts/validation/validation_test.go b/pkg/apis/rollouts/validation/validation_test.go index 58a4571ae7..c9fc9ad923 100644 --- a/pkg/apis/rollouts/validation/validation_test.go +++ b/pkg/apis/rollouts/validation/validation_test.go @@ -129,6 +129,102 @@ func TestValidateRolloutStrategyBlueGreen(t *testing.T) { assert.Equal(t, ScaleDownLimitLargerThanRevisionLimit, allErrs[1].Detail) } +func TestValidateRolloutStrategyCanaryMissingServiceNames(t *testing.T) { + tests := []struct { + name string + trafficRouting *v1alpha1.RolloutTrafficRouting + }{ + { + name: "ALB", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{RootService: "root-service"}, + }, + }, + { + name: "Istio", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{}, + }, + }, + { + name: "SMI", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + SMI: &v1alpha1.SMITrafficRouting{}, + }, + }, + { + name: "Apisix", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Apisix: &v1alpha1.ApisixTrafficRouting{}, + }, + }, + { + name: "Ambassador", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Ambassador: &v1alpha1.AmbassadorTrafficRouting{}, + }, + }, + { + name: "Nginx", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{}, + }, + }, + { + name: "AppMesh", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{}, + }, + }, + { + name: "Traefik", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Traefik: &v1alpha1.TraefikTrafficRouting{}, + }, + }, + { + name: "Traefik and Istio Subset Routing", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Traefik: &v1alpha1.TraefikTrafficRouting{}, + Istio: &v1alpha1.IstioTrafficRouting{DestinationRule: &v1alpha1.IstioDestinationRule{Name: "destination-rule"}}, + }, + }, + { + name: "AppMesh and external plugin(doesnt require service names)", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + AppMesh: &v1alpha1.AppMeshTrafficRouting{}, + Plugins: map[string]json.RawMessage{"some-plugin": []byte(`{"key": "value"}`)}, + }, + }, + { + name: "Apisix, Istio Subset Routing and external plugin(doesnt require service names)", + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Apisix: &v1alpha1.ApisixTrafficRouting{}, + Istio: &v1alpha1.IstioTrafficRouting{DestinationRule: &v1alpha1.IstioDestinationRule{Name: "destination-rule"}}, + Plugins: map[string]json.RawMessage{"some-plugin": []byte(`{"key": "value"}`)}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a table of test cases + canaryStrategy := &v1alpha1.CanaryStrategy{ + Steps: []v1alpha1.CanaryStep{{ + SetWeight: pointer.Int32(10), + }}, + } + ro := &v1alpha1.Rollout{} + ro.Spec.Strategy.Canary = canaryStrategy + + // Set the traffic route we will be testing + ro.Spec.Strategy.Canary.TrafficRouting = tt.trafficRouting + allErrs := ValidateRolloutStrategyCanary(ro, field.NewPath("")) + assert.Equal(t, InvalidTrafficRoutingMessage, allErrs[0].Detail) + }) + } +} + func TestValidateRolloutStrategyCanary(t *testing.T) { canaryStrategy := &v1alpha1.CanaryStrategy{ CanaryService: "canary", @@ -165,6 +261,83 @@ func TestValidateRolloutStrategyCanary(t *testing.T) { }, } + t.Run("valid rollout", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("valid plugin missing canary and stable service", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.TrafficRouting.ALB = nil + validRo.Spec.Strategy.Canary.TrafficRouting.Plugins = map[string]json.RawMessage{"some-plugin": []byte(`{"key": "value"}`)} + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("valid Istio missing canary and stable service", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.TrafficRouting.Istio = &v1alpha1.IstioTrafficRouting{DestinationRule: &v1alpha1.IstioDestinationRule{Name: "destination-rule"}} + validRo.Spec.Strategy.Canary.TrafficRouting.ALB = nil + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("valid Istio with ping pong", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{ + PingService: "ping", + PongService: "pong", + } + validRo.Spec.Strategy.Canary.TrafficRouting.Istio = &v1alpha1.IstioTrafficRouting{DestinationRule: &v1alpha1.IstioDestinationRule{Name: "destination-rule"}} + validRo.Spec.Strategy.Canary.TrafficRouting.ALB = nil + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("valid PingPong missing canary and stable service", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: "pong"} + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("valid two plugins missing canary and stable service", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.PingPong = &v1alpha1.PingPongSpec{PingService: "ping", PongService: "pong"} + validRo.Spec.Strategy.Canary.TrafficRouting.Istio = &v1alpha1.IstioTrafficRouting{DestinationRule: &v1alpha1.IstioDestinationRule{Name: "destination-rule"}} + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Empty(t, allErrs) + }) + + t.Run("invalid two plugins missing canary and stable service", func(t *testing.T) { + validRo := ro.DeepCopy() + validRo.Spec.Strategy.Canary.Steps[0].SetWeight = pointer.Int32(10) + validRo.Spec.Strategy.Canary.CanaryService = "" + validRo.Spec.Strategy.Canary.StableService = "" + validRo.Spec.Strategy.Canary.TrafficRouting.ALB = nil + validRo.Spec.Strategy.Canary.TrafficRouting.Istio = &v1alpha1.IstioTrafficRouting{} + validRo.Spec.Strategy.Canary.TrafficRouting.Plugins = map[string]json.RawMessage{"some-plugin": []byte(`{"key": "value"}`)} + allErrs := ValidateRolloutStrategyCanary(validRo, field.NewPath("")) + assert.Equal(t, InvalidTrafficRoutingMessage, allErrs[0].Detail) + }) + t.Run("duplicate services", func(t *testing.T) { invalidRo := ro.DeepCopy() invalidRo.Spec.Strategy.Canary.CanaryService = "stable" @@ -210,7 +383,7 @@ func TestValidateRolloutStrategyCanary(t *testing.T) { Nginx: &v1alpha1.NginxTrafficRouting{StableIngress: "stable-ingress"}, } allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) - assert.Equal(t, PingPongWithAlbOnlyMessage, allErrs[0].Detail) + assert.Equal(t, PingPongWithRouterOnlyMessage, allErrs[0].Detail) }) t.Run("invalid traffic routing", func(t *testing.T) { @@ -239,7 +412,59 @@ func TestValidateRolloutStrategyCanary(t *testing.T) { invalidRo := ro.DeepCopy() invalidRo.Spec.Strategy.Canary.Steps[0].SetWeight = &setWeight allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) - assert.Equal(t, InvalidSetWeightMessage, allErrs[0].Detail) + assert.Equal(t, fmt.Sprintf(InvalidSetWeightMessage, 100), allErrs[0].Detail) + }) + + t.Run("only nginx/plugins support max weight value", func(t *testing.T) { + anyWeight := int32(1) + + type testCases struct { + trafficRouting *v1alpha1.RolloutTrafficRouting + expectError bool + expectedError string + } + + testCasesList := []testCases{ + { + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{RootService: "root-service"}, + MaxTrafficWeight: &anyWeight, + }, + expectError: true, + expectedError: InvalidCanaryMaxWeightOnlySupportInNginxAndPlugins, + }, + { + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: "stable-ingress", + }, + MaxTrafficWeight: &anyWeight, + }, + expectError: false, + }, + { + trafficRouting: &v1alpha1.RolloutTrafficRouting{ + Plugins: map[string]json.RawMessage{ + "anyplugin": []byte(`{"key": "value"}`), + }, + MaxTrafficWeight: &anyWeight, + }, + expectError: false, + }, + } + + for _, testCase := range testCasesList { + invalidRo := ro.DeepCopy() + invalidRo.Spec.Strategy.Canary.Steps[0].SetWeight = &anyWeight + invalidRo.Spec.Strategy.Canary.TrafficRouting = testCase.trafficRouting + allErrs := ValidateRolloutStrategyCanary(invalidRo, field.NewPath("")) + if !testCase.expectError { + assert.Empty(t, allErrs) + continue + } + + assert.Equal(t, testCase.expectedError, allErrs[0].Detail) + } }) t.Run("invalid duration set in paused step", func(t *testing.T) { @@ -651,7 +876,7 @@ func TestCanaryScaleDownDelaySeconds(t *testing.T) { Canary: &v1alpha1.CanaryStrategy{ StableService: "stable", CanaryService: "canary", - ScaleDownDelaySeconds: pointer.Int32Ptr(60), + ScaleDownDelaySeconds: pointer.Int32(60), }, }, Template: corev1.PodTemplateSpec{ @@ -718,7 +943,7 @@ func TestCanaryDynamicStableScale(t *testing.T) { }) t.Run("dynamicStableScale with scaleDownDelaySeconds", func(t *testing.T) { ro := ro.DeepCopy() - ro.Spec.Strategy.Canary.ScaleDownDelaySeconds = pointer.Int32Ptr(60) + ro.Spec.Strategy.Canary.ScaleDownDelaySeconds = pointer.Int32(60) ro.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ SMI: &v1alpha1.SMITrafficRouting{}, } @@ -782,7 +1007,7 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { Experiment: &v1alpha1.RolloutExperimentStep{ Templates: []v1alpha1.RolloutExperimentTemplate{{ Name: "template", - Weight: pointer.Int32Ptr(20), + Weight: pointer.Int32(20), }}, }, }}, @@ -869,5 +1094,4 @@ func TestCanaryExperimentStepWithWeight(t *testing.T) { assert.Equal(t, 1, len(allErrs)) assert.Equal(t, errTrafficRoutingWithExperimentSupport, allErrs[0].Detail) }) - } diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index fef1aaafe4..146286fd64 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -33,8 +33,7 @@ type Interface interface { ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index 41721ca52d..0000000000 --- a/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go index 89de9c005c..710496ab7c 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysisrun.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeAnalysisRuns struct { ns string } -var analysisrunsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "analysisruns"} +var analysisrunsResource = v1alpha1.SchemeGroupVersion.WithResource("analysisruns") -var analysisrunsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "AnalysisRun"} +var analysisrunsKind = v1alpha1.SchemeGroupVersion.WithKind("AnalysisRun") // Get takes name of the analysisRun, and returns the corresponding analysisRun object, and an error if there is any. func (c *FakeAnalysisRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AnalysisRun, err error) { diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go index 2cbb02d515..05d0d6c83a 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_analysistemplate.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeAnalysisTemplates struct { ns string } -var analysistemplatesResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "analysistemplates"} +var analysistemplatesResource = v1alpha1.SchemeGroupVersion.WithResource("analysistemplates") -var analysistemplatesKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "AnalysisTemplate"} +var analysistemplatesKind = v1alpha1.SchemeGroupVersion.WithKind("AnalysisTemplate") // Get takes name of the analysisTemplate, and returns the corresponding analysisTemplate object, and an error if there is any. func (c *FakeAnalysisTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AnalysisTemplate, err error) { diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go index 36389661de..cfa35d16c3 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_clusteranalysistemplate.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeClusterAnalysisTemplates struct { Fake *FakeArgoprojV1alpha1 } -var clusteranalysistemplatesResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "clusteranalysistemplates"} +var clusteranalysistemplatesResource = v1alpha1.SchemeGroupVersion.WithResource("clusteranalysistemplates") -var clusteranalysistemplatesKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "ClusterAnalysisTemplate"} +var clusteranalysistemplatesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterAnalysisTemplate") // Get takes name of the clusterAnalysisTemplate, and returns the corresponding clusterAnalysisTemplate object, and an error if there is any. func (c *FakeClusterAnalysisTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterAnalysisTemplate, err error) { diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go index f237ce32d1..d2c72a9480 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_experiment.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeExperiments struct { ns string } -var experimentsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "experiments"} +var experimentsResource = v1alpha1.SchemeGroupVersion.WithResource("experiments") -var experimentsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Experiment"} +var experimentsKind = v1alpha1.SchemeGroupVersion.WithKind("Experiment") // Get takes name of the experiment, and returns the corresponding experiment object, and an error if there is any. func (c *FakeExperiments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Experiment, err error) { diff --git a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go index fce5e8d66e..f2eb37d613 100644 --- a/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go +++ b/pkg/client/clientset/versioned/typed/rollouts/v1alpha1/fake/fake_rollout.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeRollouts struct { ns string } -var rolloutsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "rollouts"} +var rolloutsResource = v1alpha1.SchemeGroupVersion.WithResource("rollouts") -var rolloutsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Rollout"} +var rolloutsKind = v1alpha1.SchemeGroupVersion.WithKind("Rollout") // Get takes name of the rollout, and returns the corresponding rollout object, and an error if there is any. func (c *FakeRollouts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Rollout, err error) { diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index ad38e29a88..3a643461c3 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -42,11 +42,17 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -75,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -107,20 +121,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -142,7 +175,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -160,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -167,11 +201,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Argoproj() rollouts.Interface } diff --git a/pkg/kubectl-argo-rollouts/cmd/cmd.go b/pkg/kubectl-argo-rollouts/cmd/cmd.go index 53adf27b5d..372f8c434d 100644 --- a/pkg/kubectl-argo-rollouts/cmd/cmd.go +++ b/pkg/kubectl-argo-rollouts/cmd/cmd.go @@ -56,6 +56,7 @@ func NewCmdArgoRollouts(o *options.ArgoRolloutsOptions) *cobra.Command { return o.UsageErr(c) }, } + o.AddKubectlFlags(cmd) cmd.AddCommand(create.NewCmdCreate(o)) cmd.AddCommand(get.NewCmdGet(o)) @@ -72,7 +73,7 @@ func NewCmdArgoRollouts(o *options.ArgoRolloutsOptions) *cobra.Command { cmd.AddCommand(undo.NewCmdUndo(o)) cmd.AddCommand(dashboard.NewCmdDashboard(o)) cmd.AddCommand(status.NewCmdStatus(o)) - cmd.AddCommand(notificationcmd.NewToolsCommand("notifications", "kubectl argo rollouts notifications", v1alpha1.RolloutGVR, record.NewAPIFactorySettings())) + cmd.AddCommand(notificationcmd.NewToolsCommand("notifications", "kubectl argo rollouts notifications", v1alpha1.RolloutGVR, record.NewAPIFactorySettings(nil))) cmd.AddCommand(completion.NewCmdCompletion(o)) return cmd diff --git a/pkg/kubectl-argo-rollouts/cmd/create/create.go b/pkg/kubectl-argo-rollouts/cmd/create/create.go index f5dccb4932..a6523d24f7 100644 --- a/pkg/kubectl-argo-rollouts/cmd/create/create.go +++ b/pkg/kubectl-argo-rollouts/cmd/create/create.go @@ -127,7 +127,7 @@ func isJSON(fileBytes []byte) bool { return false } -func unmarshal(fileBytes []byte, obj interface{}) error { +func unmarshal(fileBytes []byte, obj any) error { if isJSON(fileBytes) { decoder := json.NewDecoder(bytes.NewReader(fileBytes)) decoder.DisallowUnknownFields() @@ -143,7 +143,7 @@ func (c *CreateOptions) getNamespace(un unstructured.Unstructured) string { if md == nil { return ns } - metadata := md.(map[string]interface{}) + metadata := md.(map[string]any) if internalns, ok := metadata["namespace"]; ok { ns = internalns.(string) } diff --git a/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go b/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go index 48c40bad3b..f5b0719509 100644 --- a/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go +++ b/pkg/kubectl-argo-rollouts/cmd/get/get_rollout.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "strings" + "sync" "time" "github.com/juju/ansiterm" @@ -59,7 +60,11 @@ func NewCmdGetRollout(o *options.ArgoRolloutsOptions) *cobra.Command { getOptions.PrintRollout(ri) } else { rolloutUpdates := make(chan *rollout.RolloutInfo) + var rolloutUpdatesMutex sync.Mutex + controller.RegisterCallback(func(roInfo *rollout.RolloutInfo) { + rolloutUpdatesMutex.Lock() + defer rolloutUpdatesMutex.Unlock() rolloutUpdates <- roInfo }) stopCh := ctx.Done() @@ -72,6 +77,8 @@ func NewCmdGetRollout(o *options.ArgoRolloutsOptions) *cobra.Command { } go getOptions.WatchRollout(stopCh, rolloutUpdates) controller.Run(ctx) + rolloutUpdatesMutex.Lock() + defer rolloutUpdatesMutex.Unlock() close(rolloutUpdates) } return nil diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/lint.go b/pkg/kubectl-argo-rollouts/cmd/lint/lint.go index cf1b3cdb36..810abf2c8e 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/lint.go +++ b/pkg/kubectl-argo-rollouts/cmd/lint/lint.go @@ -65,7 +65,7 @@ func NewCmdLint(o *options.ArgoRolloutsOptions) *cobra.Command { return cmd } -func unmarshal(fileBytes []byte, obj interface{}) error { +func unmarshal(fileBytes []byte, obj any) error { return yaml.UnmarshalStrict(fileBytes, &obj, yaml.DisallowUnknownFields) } @@ -81,7 +81,7 @@ func (l *LintOptions) lintResource(path string) error { decoder := goyaml.NewDecoder(bytes.NewReader(fileBytes)) for { - var value interface{} + var value any if err := decoder.Decode(&value); err != nil { if err != io.EOF { return err diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml index fc04b8deb7..edabcc33f2 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-ingress-smi-multi.yml @@ -49,7 +49,7 @@ spec: - service: rollout-smi-experiment-canary weight: 5 --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollout-smi-experiment-stable @@ -61,16 +61,22 @@ spec: http: paths: - path: / + pathType: Prefix backend: - serviceName: rollout-smi-experiment-stable - servicePort: 80 + service: + name: rollout-smi-experiment-stable + port: + number: 80 - host: rollout-smi-experiment-root.local http: paths: - path: / + pathType: Prefix backend: - serviceName: rollout-smi-experiment-root - servicePort: 80 + service: + name: rollout-smi-experiment-root + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout @@ -241,4 +247,4 @@ spec: resources: requests: memory: 16Mi - cpu: 5m \ No newline at end of file + cpu: 5m diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml index ec90d8ad2d..21d6fd2a6d 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/invalid-nginx-canary.yml @@ -40,7 +40,7 @@ spec: selector: app: nginx-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: nginx-rollout-ingress @@ -50,10 +50,13 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: nginx-rollout-root - servicePort: use-annotation + service: + name: nginx-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml index c0b2131c74..8879442ca4 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-alb-canary.yml @@ -40,7 +40,7 @@ spec: selector: app: alb-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alb-rollout-ingress @@ -48,10 +48,13 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: alb-rollout-root - servicePort: use-annotation + service: + name: alb-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml index 884eebf406..b28afb5579 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi-multi.yml @@ -49,7 +49,7 @@ spec: - service: rollout-smi-experiment-canary weight: 5 --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollout-smi-experiment-stable @@ -61,16 +61,22 @@ spec: http: paths: - path: / + pathType: Prefix backend: - serviceName: rollout-smi-experiment-stable - servicePort: 80 + service: + name: rollout-smi-experiment-stable + port: + number: 80 - host: rollout-smi-experiment-root.local http: paths: - path: / + pathType: Prefix backend: - serviceName: rollout-smi-experiment-root - servicePort: 80 + service: + name: rollout-smi-experiment-root + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout @@ -241,4 +247,4 @@ spec: resources: requests: memory: 16Mi - cpu: 5m \ No newline at end of file + cpu: 5m diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml index 63f9e6cddf..cd398732ea 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-ingress-smi.yml @@ -49,7 +49,7 @@ spec: - service: rollout-smi-experiment-canary weight: 5 --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: rollout-smi-experiment-stable @@ -61,16 +61,22 @@ spec: http: paths: - path: / + pathType: Prefix backend: - serviceName: rollout-smi-experiment-stable - servicePort: 80 + service: + name: rollout-smi-experiment-stable + port: + number: 80 - host: rollout-smi-experiment-root.local http: paths: - - path: / - backend: - serviceName: rollout-smi-experiment-root - servicePort: 80 + - path: / + pathType: Prefix + backend: + service: + name: rollout-smi-experiment-root + port: + number: 80 --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml index 4d295c6c86..f0e08e83e1 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-basic-canary.yml @@ -12,7 +12,7 @@ spec: selector: app: nginx-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: nginx-rollout-ingress @@ -20,10 +20,13 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: nginx-rollout-root - servicePort: use-annotation + service: + name: nginx-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml index 30fe00ca12..f492d74617 100644 --- a/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml +++ b/pkg/kubectl-argo-rollouts/cmd/lint/testdata/valid-nginx-canary.yml @@ -40,7 +40,7 @@ spec: selector: app: nginx-rollout --- -apiVersion: networking.k8s.io/v1beta1 +apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: nginx-rollout-ingress @@ -48,10 +48,13 @@ spec: rules: - http: paths: - - path: /* + - path: / + pathType: Prefix backend: - serviceName: nginx-rollout-root - servicePort: use-annotation + service: + name: nginx-rollout-root + port: + name: use-annotation --- apiVersion: argoproj.io/v1alpha1 kind: Rollout diff --git a/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go b/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go index 493a8aaa18..cb83a157fa 100644 --- a/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go +++ b/pkg/kubectl-argo-rollouts/cmd/list/list_experiments.go @@ -99,7 +99,7 @@ func (o *ListOptions) PrintExperimentTable(expList *v1alpha1.ExperimentList) err } } } - var cols []interface{} + var cols []any if o.allNamespaces { cols = append(cols, exp.Namespace) } diff --git a/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go b/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go index d015939f69..a797e35f1d 100644 --- a/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go +++ b/pkg/kubectl-argo-rollouts/cmd/list/rollloutinfo.go @@ -86,15 +86,15 @@ func (ri *rolloutInfo) key() infoKey { func (ri *rolloutInfo) String(timestamp, namespace bool) string { fmtString := columnFmtString - args := []interface{}{ri.name, ri.strategy, ri.status, ri.step, ri.setWeight, ri.readyCurrent, ri.desired, ri.upToDate, ri.available} + args := []any{ri.name, ri.strategy, ri.status, ri.step, ri.setWeight, ri.readyCurrent, ri.desired, ri.upToDate, ri.available} if namespace { fmtString = "%-9s\t" + fmtString - args = append([]interface{}{ri.namespace}, args...) + args = append([]any{ri.namespace}, args...) } if timestamp { fmtString = "%-20s\t" + fmtString timestampStr := timeutil.Now().UTC().Truncate(time.Second).Format("2006-01-02T15:04:05Z") - args = append([]interface{}{timestampStr}, args...) + args = append([]any{timestampStr}, args...) } return fmt.Sprintf(fmtString, args...) } diff --git a/pkg/kubectl-argo-rollouts/cmd/set/set_image.go b/pkg/kubectl-argo-rollouts/cmd/set/set_image.go index 45ed6faac9..1a5b5dce24 100644 --- a/pkg/kubectl-argo-rollouts/cmd/set/set_image.go +++ b/pkg/kubectl-argo-rollouts/cmd/set/set_image.go @@ -124,9 +124,9 @@ func newRolloutSetImage(orig *unstructured.Unstructured, container string, image if !ok { continue } - ctrList := ctrListIf.([]interface{}) + ctrList := ctrListIf.([]any) for _, ctrIf := range ctrList { - ctr := ctrIf.(map[string]interface{}) + ctr := ctrIf.(map[string]any) if name, _, _ := unstructured.NestedString(ctr, "name"); name == container || container == "*" { ctr["image"] = image containerFound = true diff --git a/pkg/kubectl-argo-rollouts/cmd/undo/undo.go b/pkg/kubectl-argo-rollouts/cmd/undo/undo.go index a282757b16..d3a78d15f2 100644 --- a/pkg/kubectl-argo-rollouts/cmd/undo/undo.go +++ b/pkg/kubectl-argo-rollouts/cmd/undo/undo.go @@ -176,8 +176,8 @@ func rolloutRevision(ro *unstructured.Unstructured, c kubernetes.Interface, toRe } func getRolloutPatch(podTemplate *corev1.PodTemplateSpec, annotations map[string]string) (types.PatchType, []byte, error) { - patch, err := json.Marshal([]interface{}{ - map[string]interface{}{ + patch, err := json.Marshal([]any{ + map[string]any{ "op": "replace", "path": "/spec/template", "value": podTemplate, @@ -235,12 +235,12 @@ func listReplicaSets(ro *unstructured.Unstructured, getRSList rsListFunc) ([]*ap return owned, nil } -func extractLabelSelector(v map[string]interface{}) (*metav1.LabelSelector, error) { +func extractLabelSelector(v map[string]any) (*metav1.LabelSelector, error) { labels, _, _ := unstructured.NestedStringMap(v, "spec", "selector", "matchLabels") items, _, _ := unstructured.NestedSlice(v, "spec", "selector", "matchExpressions") matchExpressions := []metav1.LabelSelectorRequirement{} for _, item := range items { - m, ok := item.(map[string]interface{}) + m, ok := item.(map[string]any) if !ok { return nil, fmt.Errorf("unable to retrieve matchExpressions for object, item %v is not a map", item) } diff --git a/pkg/kubectl-argo-rollouts/info/analysisrun_info.go b/pkg/kubectl-argo-rollouts/info/analysisrun_info.go index abf19b1273..8f7049a3e5 100644 --- a/pkg/kubectl-argo-rollouts/info/analysisrun_info.go +++ b/pkg/kubectl-argo-rollouts/info/analysisrun_info.go @@ -70,9 +70,14 @@ func getAnalysisRunInfo(ownerUID types.UID, allAnalysisRuns []*v1alpha1.Analysis for _, measurement := range analysisutil.ArrayMeasurement(run, mr.Name) { if measurement.Metadata != nil { if jobName, ok := measurement.Metadata[job.JobNameKey]; ok { + ns := run.Namespace + if jobNamespace, ok := measurement.Metadata[job.JobNamespaceKey]; ok { + ns = jobNamespace + } jobInfo := rollout.JobInfo{ ObjectMeta: &v1.ObjectMeta{ - Name: jobName, + Name: jobName, + Namespace: ns, }, Icon: analysisIcon(measurement.Phase), Status: string(measurement.Phase), diff --git a/pkg/kubectl-argo-rollouts/info/info_test.go b/pkg/kubectl-argo-rollouts/info/info_test.go index d7bbe161fd..ab28b48637 100644 --- a/pkg/kubectl-argo-rollouts/info/info_test.go +++ b/pkg/kubectl-argo-rollouts/info/info_test.go @@ -169,3 +169,11 @@ func TestRolloutAborted(t *testing.T) { assert.Equal(t, "Degraded", roInfo.Status) assert.Equal(t, `RolloutAborted: metric "web" assessed Failed due to failed (1) > failureLimit (0)`, roInfo.Message) } + +func TestRolloutInfoMetadata(t *testing.T) { + rolloutObjs := testdata.NewCanaryRollout() + roInfo := NewRolloutInfo(rolloutObjs.Rollouts[0], rolloutObjs.ReplicaSets, rolloutObjs.Pods, rolloutObjs.Experiments, rolloutObjs.AnalysisRuns, nil) + assert.Equal(t, roInfo.ObjectMeta.Name, rolloutObjs.Rollouts[0].Name) + assert.Equal(t, roInfo.ObjectMeta.Annotations, rolloutObjs.Rollouts[0].Annotations) + assert.Equal(t, roInfo.ObjectMeta.Labels, rolloutObjs.Rollouts[0].Labels) +} diff --git a/pkg/kubectl-argo-rollouts/info/replicaset_info.go b/pkg/kubectl-argo-rollouts/info/replicaset_info.go index 362261daef..4c1a3a1866 100644 --- a/pkg/kubectl-argo-rollouts/info/replicaset_info.go +++ b/pkg/kubectl-argo-rollouts/info/replicaset_info.go @@ -73,6 +73,11 @@ func GetReplicaSetInfo(ownerUID types.UID, ro *v1alpha1.Rollout, allReplicaSets for _, ctr := range rs.Spec.Template.Spec.Containers { rsInfo.Images = append(rsInfo.Images, ctr.Image) } + + for _, ctr := range rs.Spec.Template.Spec.InitContainers { + rsInfo.InitContainerImages = append(rsInfo.InitContainerImages, ctr.Image) + } + rsInfos = append(rsInfos, rsInfo) } sort.Slice(rsInfos[:], func(i, j int) bool { diff --git a/pkg/kubectl-argo-rollouts/info/rollout_info.go b/pkg/kubectl-argo-rollouts/info/rollout_info.go index 59ee3f076a..37604aa03b 100644 --- a/pkg/kubectl-argo-rollouts/info/rollout_info.go +++ b/pkg/kubectl-argo-rollouts/info/rollout_info.go @@ -14,6 +14,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/defaults" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + "github.com/argoproj/argo-rollouts/utils/weightutil" ) func NewRolloutInfo( @@ -29,6 +30,8 @@ func NewRolloutInfo( ObjectMeta: &v1.ObjectMeta{ Name: ro.Name, Namespace: ro.Namespace, + Labels: ro.Labels, + Annotations: ro.Annotations, UID: ro.UID, CreationTimestamp: ro.CreationTimestamp, ResourceVersion: ro.ObjectMeta.ResourceVersion, @@ -56,12 +59,12 @@ func NewRolloutInfo( currentStep, _ := replicasetutil.GetCurrentCanaryStep(ro) if currentStep == nil { - roInfo.ActualWeight = "100" + roInfo.ActualWeight = fmt.Sprintf("%d", weightutil.MaxTrafficWeight(ro)) } else if ro.Status.AvailableReplicas > 0 { if ro.Spec.Strategy.Canary.TrafficRouting == nil { for _, rs := range roInfo.ReplicaSets { if rs.Canary { - roInfo.ActualWeight = fmt.Sprintf("%d", (rs.Available*100)/ro.Status.AvailableReplicas) + roInfo.ActualWeight = fmt.Sprintf("%d", (rs.Available*weightutil.MaxTrafficWeight(ro))/ro.Status.AvailableReplicas) } } } else { @@ -82,16 +85,23 @@ func NewRolloutInfo( roInfo.Containers = []*rollout.ContainerInfo{} var containerList []corev1.Container + var initContainerList []corev1.Container if workloadRef != nil { containerList = workloadRef.Spec.Template.Spec.Containers + initContainerList = workloadRef.Spec.Template.Spec.InitContainers } else { containerList = ro.Spec.Template.Spec.Containers + initContainerList = ro.Spec.Template.Spec.InitContainers } for _, c := range containerList { roInfo.Containers = append(roInfo.Containers, &rollout.ContainerInfo{Name: c.Name, Image: c.Image}) } + for _, c := range initContainerList { + roInfo.InitContainers = append(roInfo.InitContainers, &rollout.ContainerInfo{Name: c.Name, Image: c.Image}) + } + if ro.Status.RestartedAt != nil { roInfo.RestartedAt = ro.Status.RestartedAt.String() } else { diff --git a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go index 157f148d5f..79dc728428 100644 --- a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go +++ b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller.go @@ -3,6 +3,7 @@ package viewcontroller import ( "context" "reflect" + "sync" "time" "github.com/argoproj/argo-rollouts/utils/queue" @@ -11,7 +12,6 @@ import ( v1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" appslisters "k8s.io/client-go/listers/apps/v1" @@ -32,7 +32,7 @@ type viewController struct { name string namespace string - kubeInformerFactory informers.SharedInformerFactory + kubeInformerFactory kubeinformers.SharedInformerFactory rolloutsInformerFactory rolloutinformers.SharedInformerFactory replicaSetLister appslisters.ReplicaSetNamespaceLister @@ -45,9 +45,11 @@ type viewController struct { cacheSyncs []cache.InformerSynced workqueue workqueue.RateLimitingInterface - prevObj interface{} - getObj func() (interface{}, error) - callbacks []func(interface{}) + prevObj any + getObj func() (any, error) + callbacks []func(any) + // acquire 'callbacksLock' before reading/writing to 'callbacks' + callbacksLock sync.Mutex } type RolloutViewController struct { @@ -71,7 +73,7 @@ func NewRolloutViewController(namespace string, name string, kubeClient kubernet rvc := RolloutViewController{ viewController: vc, } - vc.getObj = func() (interface{}, error) { + vc.getObj = func() (any, error) { return rvc.GetRolloutInfo() } return &rvc @@ -82,7 +84,7 @@ func NewExperimentViewController(namespace string, name string, kubeClient kuber evc := ExperimentViewController{ viewController: vc, } - vc.getObj = func() (interface{}, error) { + vc.getObj = func() (any, error) { return evc.GetExperimentInfo() } return &evc @@ -114,13 +116,13 @@ func newViewController(namespace string, name string, kubeClient kubernetes.Inte ) enqueueRolloutHandlerFuncs := cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controller.workqueue.Add(controller.name) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { controller.workqueue.Add(controller.name) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controller.workqueue.Add(controller.name) }, } @@ -164,7 +166,13 @@ func (c *viewController) processNextWorkItem() bool { return true } if !reflect.DeepEqual(c.prevObj, newObj) { - for _, cb := range c.callbacks { + + // Acquire the mutex and make a thread-local copy of the list of callbacks + c.callbacksLock.Lock() + callbacks := append(make([]func(any), 0), c.callbacks...) + c.callbacksLock.Unlock() + + for _, cb := range callbacks { cb(newObj) } c.prevObj = newObj @@ -173,6 +181,9 @@ func (c *viewController) processNextWorkItem() bool { } func (c *viewController) DeregisterCallbacks() { + c.callbacksLock.Lock() + defer c.callbacksLock.Unlock() + c.callbacks = nil } @@ -215,9 +226,12 @@ func (c *RolloutViewController) GetRolloutInfo() (*rollout.RolloutInfo, error) { } func (c *RolloutViewController) RegisterCallback(callback RolloutInfoCallback) { - cb := func(i interface{}) { + cb := func(i any) { callback(i.(*rollout.RolloutInfo)) } + c.callbacksLock.Lock() + defer c.callbacksLock.Unlock() + c.callbacks = append(c.callbacks, cb) } @@ -243,8 +257,10 @@ func (c *ExperimentViewController) GetExperimentInfo() (*rollout.ExperimentInfo, } func (c *ExperimentViewController) RegisterCallback(callback ExperimentInfoCallback) { - cb := func(i interface{}) { + cb := func(i any) { callback(i.(*rollout.ExperimentInfo)) } + c.callbacksLock.Lock() + defer c.callbacksLock.Unlock() c.callbacks = append(c.callbacks, cb) } diff --git a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller_test.go b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller_test.go index 90752629c2..7e1dd9c30e 100644 --- a/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller_test.go +++ b/pkg/kubectl-argo-rollouts/viewcontroller/viewcontroller_test.go @@ -2,6 +2,7 @@ package viewcontroller import ( "context" + "sync" "testing" "time" @@ -53,7 +54,11 @@ func TestRolloutControllerCallback(t *testing.T) { } callbackCalled := false + var callbackCalledLock sync.Mutex // acquire before accessing callbackCalled + cb := func(roInfo *rollout.RolloutInfo) { + callbackCalledLock.Lock() + defer callbackCalledLock.Unlock() callbackCalled = true assert.Equal(t, roInfo.ObjectMeta.Name, "foo") } @@ -67,11 +72,16 @@ func TestRolloutControllerCallback(t *testing.T) { go c.Run(ctx) time.Sleep(time.Second) for i := 0; i < 100; i++ { - if callbackCalled { + callbackCalledLock.Lock() + isCallbackCalled := callbackCalled + callbackCalledLock.Unlock() + if isCallbackCalled { break } time.Sleep(10 * time.Millisecond) } + callbackCalledLock.Lock() + defer callbackCalledLock.Unlock() assert.True(t, callbackCalled) } @@ -100,8 +110,11 @@ func TestExperimentControllerCallback(t *testing.T) { }, } + var callbackCalledLock sync.Mutex // acquire before accessing callbackCalled callbackCalled := false cb := func(expInfo *rollout.ExperimentInfo) { + callbackCalledLock.Lock() + defer callbackCalledLock.Unlock() callbackCalled = true assert.Equal(t, expInfo.ObjectMeta.Name, "foo") } @@ -115,10 +128,15 @@ func TestExperimentControllerCallback(t *testing.T) { go c.Run(ctx) time.Sleep(time.Second) for i := 0; i < 100; i++ { - if callbackCalled { + callbackCalledLock.Lock() + isCallbackCalled := callbackCalled + callbackCalledLock.Unlock() + if isCallbackCalled { break } time.Sleep(10 * time.Millisecond) } + callbackCalledLock.Lock() + defer callbackCalledLock.Unlock() assert.True(t, callbackCalled) } diff --git a/rollout/analysis.go b/rollout/analysis.go index 49583287f6..7bb0d47f1f 100644 --- a/rollout/analysis.go +++ b/rollout/analysis.go @@ -313,7 +313,7 @@ func (c *rolloutContext) reconcilePostPromotionAnalysisRun() (*v1alpha1.Analysis func (c *rolloutContext) reconcileBackgroundAnalysisRun() (*v1alpha1.AnalysisRun, error) { currentAr := c.currentArs.CanaryBackground - if c.rollout.Spec.Strategy.Canary.Analysis == nil { + if c.rollout.Spec.Strategy.Canary.Analysis == nil || len(c.rollout.Spec.Strategy.Canary.Analysis.Templates) == 0 { err := c.cancelAnalysisRuns([]*v1alpha1.AnalysisRun{currentAr}) return nil, err } @@ -431,51 +431,79 @@ func (c *rolloutContext) newAnalysisRunFromRollout(rolloutAnalysis *v1alpha1.Rol name := strings.Join(nameParts, "-") var run *v1alpha1.AnalysisRun var err error + templates, clusterTemplates, err := c.getAnalysisTemplatesFromRefs(&rolloutAnalysis.Templates) + if err != nil { + return nil, err + } + runLabels := labels + for k, v := range rolloutAnalysis.AnalysisRunMetadata.Labels { + runLabels[k] = v + } + + for k, v := range c.rollout.Spec.Selector.MatchLabels { + runLabels[k] = v + } + + runAnnotations := map[string]string{ + annotations.RevisionAnnotation: revision, + } + for k, v := range rolloutAnalysis.AnalysisRunMetadata.Annotations { + runAnnotations[k] = v + } + run, err = analysisutil.NewAnalysisRunFromTemplates(templates, clusterTemplates, args, rolloutAnalysis.DryRun, rolloutAnalysis.MeasurementRetention, + runLabels, runAnnotations, name, "", c.rollout.Namespace) + if err != nil { + return nil, err + } + run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(c.rollout, controllerKind)} + return run, nil +} + +func (c *rolloutContext) getAnalysisTemplatesFromRefs(templateRefs *[]v1alpha1.AnalysisTemplateRef) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate, error) { templates := make([]*v1alpha1.AnalysisTemplate, 0) clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) - for _, templateRef := range rolloutAnalysis.Templates { + for _, templateRef := range *templateRefs { if templateRef.ClusterScope { template, err := c.clusterAnalysisTemplateLister.Get(templateRef.TemplateName) if err != nil { if k8serrors.IsNotFound(err) { c.log.Warnf("ClusterAnalysisTemplate '%s' not found", templateRef.TemplateName) } - return nil, err + return nil, nil, err } clusterTemplates = append(clusterTemplates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := c.getAnalysisTemplatesFromRefs(&template.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } } else { template, err := c.analysisTemplateLister.AnalysisTemplates(c.rollout.Namespace).Get(templateRef.TemplateName) if err != nil { if k8serrors.IsNotFound(err) { c.log.Warnf("AnalysisTemplate '%s' not found", templateRef.TemplateName) } - return nil, err + return nil, nil, err } templates = append(templates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerTemplates, innerClusterTemplates, innerErr := c.getAnalysisTemplatesFromRefs(&template.Spec.Templates) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } } } - run, err = analysisutil.NewAnalysisRunFromTemplates(templates, clusterTemplates, args, rolloutAnalysis.DryRun, rolloutAnalysis.MeasurementRetention, name, "", c.rollout.Namespace) - if err != nil { - return nil, err - } - run.Labels = labels - for k, v := range rolloutAnalysis.AnalysisRunMetadata.Labels { - run.Labels[k] = v - } - - for k, v := range c.rollout.Spec.Selector.MatchLabels { - run.Labels[k] = v - } - - run.Annotations = map[string]string{ - annotations.RevisionAnnotation: revision, - } - for k, v := range rolloutAnalysis.AnalysisRunMetadata.Annotations { - run.Annotations[k] = v - } - run.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(c.rollout, controllerKind)} - return run, nil + uniqueTemplates, uniqueClusterTemplates := analysisutil.FilterUniqueTemplates(templates, clusterTemplates) + return uniqueTemplates, uniqueClusterTemplates, nil } func (c *rolloutContext) deleteAnalysisRuns(ars []*v1alpha1.AnalysisRun) error { diff --git a/rollout/analysis_test.go b/rollout/analysis_test.go index 624134fbb6..46a741bd63 100644 --- a/rollout/analysis_test.go +++ b/rollout/analysis_test.go @@ -43,19 +43,102 @@ func analysisTemplate(name string) *v1alpha1.AnalysisTemplate { } } -func clusterAnalysisTemplate(name string) *v1alpha1.ClusterAnalysisTemplate { +func analysisTemplateWithNamespacedAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithAnalysisRefs(name, false, innerRefsName...) +} + +func analysisTemplateWithClusterAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithAnalysisRefs(name, true, innerRefsName...) +} + +func analysisTemplateWithAnalysisRefs(name string, clusterScope bool, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: clusterScope, + }) + } + return &v1alpha1.AnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "example-" + name, + }}, + DryRun: []v1alpha1.DryRun{{ + MetricName: "example-" + name, + }}, + MeasurementRetention: []v1alpha1.MeasurementRetention{{ + MetricName: "example-" + name, + }}, + Templates: templatesRefs, + }, + } +} + +func analysisTemplateWithOnlyNamespacedAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + return analysisTemplateWithOnlyRefs(name, false, innerRefsName...) +} + +func analysisTemplateWithOnlyRefs(name string, clusterScope bool, innerRefsName ...string) *v1alpha1.AnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: clusterScope, + }) + } + return &v1alpha1.AnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{}, + DryRun: []v1alpha1.DryRun{}, + MeasurementRetention: []v1alpha1.MeasurementRetention{}, + Templates: templatesRefs, + }, + } +} + +func clusterAnalysisTemplate(name string, metricName string) *v1alpha1.ClusterAnalysisTemplate { return &v1alpha1.ClusterAnalysisTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{{ - Name: "clusterexample", + Name: metricName, }}, }, } } +func clusterAnalysisTemplateWithAnalysisRefs(name string, innerRefsName ...string) *v1alpha1.ClusterAnalysisTemplate { + templatesRefs := []v1alpha1.AnalysisTemplateRef{} + for _, innerTplName := range innerRefsName { + templatesRefs = append(templatesRefs, v1alpha1.AnalysisTemplateRef{ + TemplateName: innerTplName, + ClusterScope: true, + }) + } + return &v1alpha1.ClusterAnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{{ + Name: "clusterexample-" + name, + }}, + Templates: templatesRefs, + }, + } +} + func clusterAnalysisRun(cat *v1alpha1.ClusterAnalysisTemplate, analysisRunType string, r *v1alpha1.Rollout) *v1alpha1.AnalysisRun { labels := map[string]string{} podHash := hash.ComputePodTemplateHash(&r.Spec.Template, r.Status.CollisionCount) @@ -133,7 +216,7 @@ func TestCreateBackgroundAnalysisRun(t *testing.T) { ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -196,7 +279,7 @@ func TestCreateBackgroundAnalysisRunWithTemplates(t *testing.T) { ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, }, @@ -251,13 +334,13 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplates(t *testing.T) { steps := []v1alpha1.CanaryStep{{ SetWeight: int32Ptr(10), }} - cat := clusterAnalysisTemplate("bar") + cat := clusterAnalysisTemplate("bar", "clusterexample") r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r2 := bumpVersion(r1) ar := clusterAnalysisRun(cat, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: cat.Name, ClusterScope: true, }}, @@ -313,7 +396,7 @@ func TestInvalidSpecMissingClusterTemplatesBackgroundAnalysis(t *testing.T) { r := newCanaryRollout("foo", 10, nil, nil, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "missing", ClusterScope: true, }}, @@ -350,7 +433,7 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T SetWeight: int32Ptr(10), }} at := analysisTemplate("bar") - cat := clusterAnalysisTemplate("clusterbar") + cat := clusterAnalysisTemplate("clusterbar", "clusterexample") r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r2 := bumpVersion(r1) @@ -367,7 +450,7 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T } r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: cat.Name, ClusterScope: true, }, { @@ -419,6 +502,169 @@ func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplate(t *testing.T assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch) } +func TestCreateBackgroundAnalysisRunWithClusterTemplatesAndTemplateAndInnerTemplates(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{{ + SetWeight: int32Ptr(10), + }} + at := analysisTemplateWithNamespacedAnalysisRefs("bar", "bar2") + at2 := analysisTemplateWithClusterAnalysisRefs("bar2", "clusterbar2", "clusterbar4") + cat := clusterAnalysisTemplateWithAnalysisRefs("clusterbar", "clusterbar2", "clusterbar3") + cat2 := clusterAnalysisTemplate("clusterbar2", "clusterexample-clusterbar2") + cat3 := clusterAnalysisTemplate("clusterbar3", "clusterexample-clusterbar3") + cat4 := clusterAnalysisTemplate("clusterbar4", "clusterexample-clusterbar4") + r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r2 := bumpVersion(r1) + + ar := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run1", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(r1, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: concatMultipleSlices([][]v1alpha1.Metric{at.Spec.Metrics, at2.Spec.Metrics, cat.Spec.Metrics, cat2.Spec.Metrics, cat3.Spec.Metrics, cat4.Spec.Metrics}), + Args: at.Spec.Args, + }, + } + r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ + RolloutAnalysis: v1alpha1.RolloutAnalysis{ + Templates: []v1alpha1.AnalysisTemplateRef{{ + TemplateName: cat.Name, + ClusterScope: true, + }, { + TemplateName: at.Name, + }}, + }, + } + rs1 := newReplicaSetWithStatus(r1, 10, 10) + rs2 := newReplicaSetWithStatus(r2, 0, 0) + f.kubeobjects = append(f.kubeobjects, rs1, rs2) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 10, 0, 10, false) + progressingCondition, _ := newProgressingCondition(conditions.ReplicaSetUpdatedReason, rs2, "") + conditions.SetRolloutCondition(&r2.Status, progressingCondition) + availableCondition, _ := newAvailableCondition(true) + conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + + f.rolloutLister = append(f.rolloutLister, r2) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, cat, cat2, cat3, cat4) + f.analysisTemplateLister = append(f.analysisTemplateLister, at, at2) + f.objects = append(f.objects, r2, cat, at, at2, cat2, cat3, cat4) + + createdIndex := f.expectCreateAnalysisRunAction(ar) + f.expectUpdateReplicaSetAction(rs2) + index := f.expectPatchRolloutAction(r1) + + f.run(getKey(r2, t)) + createdAr := f.getCreatedAnalysisRun(createdIndex) + expectedArName := fmt.Sprintf("%s-%s-%s", r2.Name, rs2PodHash, "2") + assert.Equal(t, expectedArName, createdAr.Name) + assert.Len(t, createdAr.Spec.Metrics, 6) + + patch := f.getPatchedRollout(index) + expectedPatch := `{ + "status": { + "canary": { + "currentBackgroundAnalysisRunStatus": { + "name": "%s", + "status": "" + } + } + } + }` + assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch) +} + +// Test the case where the analysis template does't have metrics, but refences other templates +func TestCreateBackgroundAnalysisRunWithTemplatesAndNoMetrics(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{{ + SetWeight: int32Ptr(10), + }} + at := analysisTemplateWithOnlyNamespacedAnalysisRefs("bar", "bar2") + at2 := analysisTemplateWithClusterAnalysisRefs("bar2", "clusterbar2", "clusterbar4") + cat := clusterAnalysisTemplateWithAnalysisRefs("clusterbar", "clusterbar2", "clusterbar3") + cat2 := clusterAnalysisTemplate("clusterbar2", "clusterexample-clusterbar2") + cat3 := clusterAnalysisTemplate("clusterbar3", "clusterexample-clusterbar3") + cat4 := clusterAnalysisTemplate("clusterbar4", "clusterexample-clusterbar4") + r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r2 := bumpVersion(r1) + + ar := &v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "run1", + Namespace: metav1.NamespaceDefault, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(r1, controllerKind)}, + }, + Spec: v1alpha1.AnalysisRunSpec{ + Metrics: concatMultipleSlices([][]v1alpha1.Metric{at.Spec.Metrics, at2.Spec.Metrics, cat.Spec.Metrics, cat2.Spec.Metrics, cat3.Spec.Metrics, cat4.Spec.Metrics}), + Args: at.Spec.Args, + }, + } + r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ + RolloutAnalysis: v1alpha1.RolloutAnalysis{ + Templates: []v1alpha1.AnalysisTemplateRef{{ + TemplateName: cat.Name, + ClusterScope: true, + }, { + TemplateName: at.Name, + }}, + }, + } + rs1 := newReplicaSetWithStatus(r1, 10, 10) + rs2 := newReplicaSetWithStatus(r2, 0, 0) + f.kubeobjects = append(f.kubeobjects, rs1, rs2) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 10, 0, 10, false) + progressingCondition, _ := newProgressingCondition(conditions.ReplicaSetUpdatedReason, rs2, "") + conditions.SetRolloutCondition(&r2.Status, progressingCondition) + availableCondition, _ := newAvailableCondition(true) + conditions.SetRolloutCondition(&r2.Status, availableCondition) + completedCondition, _ := newCompletedCondition(false) + conditions.SetRolloutCondition(&r2.Status, completedCondition) + + f.rolloutLister = append(f.rolloutLister, r2) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, cat, cat2, cat3, cat4) + f.analysisTemplateLister = append(f.analysisTemplateLister, at, at2) + f.objects = append(f.objects, r2, cat, at, at2, cat2, cat3, cat4) + + createdIndex := f.expectCreateAnalysisRunAction(ar) + f.expectUpdateReplicaSetAction(rs2) + index := f.expectPatchRolloutAction(r1) + + f.run(getKey(r2, t)) + createdAr := f.getCreatedAnalysisRun(createdIndex) + expectedArName := fmt.Sprintf("%s-%s-%s", r2.Name, rs2PodHash, "2") + assert.Equal(t, expectedArName, createdAr.Name) + assert.Len(t, createdAr.Spec.Metrics, 5) + + patch := f.getPatchedRollout(index) + expectedPatch := `{ + "status": { + "canary": { + "currentBackgroundAnalysisRunStatus": { + "name": "%s", + "status": "" + } + } + } + }` + assert.JSONEq(t, calculatePatch(r2, fmt.Sprintf(expectedPatch, expectedArName)), patch) +} + // TestCreateAnalysisRunWithCollision ensures we will create an new analysis run with a new name // when there is a conflict (e.g. such as when there is a retry) func TestCreateAnalysisRunWithCollision(t *testing.T) { @@ -434,7 +680,7 @@ func TestCreateAnalysisRunWithCollision(t *testing.T) { ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -505,7 +751,7 @@ func TestCreateAnalysisRunWithCollisionAndSemanticEquality(t *testing.T) { ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -560,7 +806,7 @@ func TestCreateAnalysisRunOnAnalysisStep(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -621,7 +867,7 @@ func TestCreateAnalysisRunOnPromotedAnalysisStepIfPreviousStepWasAnalysisToo(t * at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -629,7 +875,7 @@ func TestCreateAnalysisRunOnPromotedAnalysisStepIfPreviousStepWasAnalysisToo(t * }, }, { Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -698,7 +944,7 @@ func TestFailCreateStepAnalysisRunIfInvalidTemplateRef(t *testing.T) { steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: "bad-template", }, @@ -749,7 +995,7 @@ func TestFailCreateBackgroundAnalysisRunIfInvalidTemplateRef(t *testing.T) { r := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: "bad-template", }, @@ -788,23 +1034,25 @@ func TestFailCreateBackgroundAnalysisRunIfMetricRepeated(t *testing.T) { }} at := analysisTemplate("bad-template") + at2 := analysisTemplate("bad-template-2") at.Spec.Metrics = append(at.Spec.Metrics, at.Spec.Metrics[0]) - f.analysisTemplateLister = append(f.analysisTemplateLister, at) + at2.Spec.Metrics = append(at2.Spec.Metrics, at2.Spec.Metrics[0]) + f.analysisTemplateLister = append(f.analysisTemplateLister, at, at2) r := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, { - TemplateName: at.Name, + TemplateName: at2.Name, }, }, }, } f.rolloutLister = append(f.rolloutLister, r) - f.objects = append(f.objects, r, at) + f.objects = append(f.objects, r, at, at2) patchIndex := f.expectPatchRolloutAction(r) f.run(getKey(r, t)) @@ -816,7 +1064,7 @@ func TestFailCreateBackgroundAnalysisRunIfMetricRepeated(t *testing.T) { "message": "InvalidSpec: %s" } }` - errmsg := "The Rollout \"foo\" is invalid: spec.strategy.canary.analysis.templates: Invalid value: \"templateNames: [bad-template bad-template]\": two metrics have the same name 'example'" + errmsg := "The Rollout \"foo\" is invalid: spec.strategy.canary.analysis.templates: Invalid value: \"templateNames: [bad-template bad-template-2]\": two metrics have the same name 'example'" _, progressingCond := newProgressingCondition(conditions.ReplicaSetUpdatedReason, r, "") invalidSpecCond := conditions.NewRolloutCondition(v1alpha1.InvalidSpec, corev1.ConditionTrue, conditions.InvalidSpecReason, errmsg) invalidSpecBytes, _ := json.Marshal(invalidSpecCond) @@ -839,7 +1087,7 @@ func TestDoNothingWithAnalysisRunsWhileBackgroundAnalysisRunRunning(t *testing.T r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -886,7 +1134,7 @@ func TestDoNothingWhileStepBasedAnalysisRunRunning(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -935,7 +1183,7 @@ func TestCancelOlderAnalysisRuns(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1003,7 +1251,7 @@ func TestDeleteAnalysisRunsWithNoMatchingRS(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1059,7 +1307,7 @@ func TestDeleteAnalysisRunsAfterRSDelete(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1116,7 +1364,7 @@ func TestIncrementStepAfterSuccessfulAnalysisRun(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1180,7 +1428,7 @@ func TestPausedOnInconclusiveBackgroundAnalysisRun(t *testing.T) { ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1240,7 +1488,7 @@ func TestPausedStepAfterInconclusiveAnalysisRun(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1303,7 +1551,7 @@ func TestErrorConditionAfterErrorAnalysisRunStep(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1378,7 +1626,7 @@ func TestErrorConditionAfterErrorAnalysisRunBackground(t *testing.T) { r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1446,7 +1694,7 @@ func TestCancelAnalysisRunsWhenAborted(t *testing.T) { at := analysisTemplate("bar") steps := []v1alpha1.CanaryStep{{ Analysis: &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1513,7 +1761,7 @@ func TestCancelBackgroundAnalysisRunWhenRolloutIsCompleted(t *testing.T) { r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1559,7 +1807,7 @@ func TestDoNotCreateBackgroundAnalysisRunAfterInconclusiveRun(t *testing.T) { r2 := bumpVersion(r1) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1614,7 +1862,7 @@ func TestDoNotCreateBackgroundAnalysisRunOnNewCanaryRollout(t *testing.T) { r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r1.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1649,7 +1897,7 @@ func TestDoNotCreateBackgroundAnalysisRunOnNewCanaryRolloutStableRSEmpty(t *test r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) r1.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -1679,7 +1927,7 @@ func TestCreatePrePromotionAnalysisRun(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -1737,7 +1985,7 @@ func TestDoNotCreatePrePromotionAnalysisAfterPromotionRollout(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "bar", "") r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "test", }}, } @@ -1786,7 +2034,7 @@ func TestDoNotCreatePrePromotionAnalysisRunOnNewRollout(t *testing.T) { r := newBlueGreenRollout("foo", 1, nil, "active", "") r.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "test", }}, } @@ -1819,7 +2067,7 @@ func TestDoNotCreatePrePromotionAnalysisRunOnNotReadyReplicaSet(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "test", }}, } @@ -1861,7 +2109,7 @@ func TestRolloutPrePromotionAnalysisBecomesInconclusive(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -1931,7 +2179,7 @@ func TestRolloutPrePromotionAnalysisSwitchServiceAfterSuccess(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(true) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -1996,7 +2244,7 @@ func TestRolloutPrePromotionAnalysisHonorAutoPromotionSeconds(t *testing.T) { r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.AutoPromotionSeconds = 10 r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2061,7 +2309,7 @@ func TestRolloutPrePromotionAnalysisDoNothingOnInconclusiveAnalysis(t *testing.T r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2115,7 +2363,7 @@ func TestAbortRolloutOnErrorPrePromotionAnalysis(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2185,7 +2433,7 @@ func TestCreatePostPromotionAnalysisRun(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "active", "") r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2232,7 +2480,7 @@ func TestRolloutPostPromotionAnalysisSuccess(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "active", "") r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2290,7 +2538,7 @@ func TestPostPromotionAnalysisRunHandleInconclusive(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "active", "") r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2353,7 +2601,7 @@ func TestAbortRolloutOnErrorPostPromotionAnalysis(t *testing.T) { r1 := newBlueGreenRollout("foo", 1, nil, "active", "") r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: at.Name, }}, } @@ -2430,7 +2678,7 @@ func TestCreateAnalysisRunWithCustomAnalysisRunMetadataAndROCopyLabels(t *testin ar := analysisRun(at, v1alpha1.RolloutTypeBackgroundRunLabel, r2) r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -2468,3 +2716,63 @@ func TestCreateAnalysisRunWithCustomAnalysisRunMetadataAndROCopyLabels(t *testin assert.Equal(t, "testLabelValue", createdAr.Labels["testLabelKey"]) assert.Equal(t, "1234", createdAr.Labels["my-label"]) } + +func TestCancelBackgroundAnalysisRunWhenRolloutAnalysisHasNoTemplate(t *testing.T) { + f := newFixture(t) + defer f.Close() + + at := analysisTemplate("bar") + steps := []v1alpha1.CanaryStep{ + {SetWeight: pointer.Int32Ptr(10)}, + } + + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(0), intstr.FromInt(1)) + rs1 := newReplicaSetWithStatus(r1, 1, 1) + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + r1 = updateCanaryRolloutStatus(r1, rs1PodHash, 1, 1, 1, false) + ar := analysisRun(at, v1alpha1.RolloutTypeStepLabel, r1) + r1.Status.Canary.CurrentBackgroundAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{ + Name: ar.Name, + Status: v1alpha1.AnalysisPhaseRunning, + } + + r2 := bumpVersion(r1) + r2.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ + RolloutAnalysis: v1alpha1.RolloutAnalysis{}, // No templates provided. + } + rs2 := newReplicaSetWithStatus(r2, 0, 0) + + f.kubeobjects = append(f.kubeobjects, rs1, rs2) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + f.rolloutLister = append(f.rolloutLister, r2) + f.analysisTemplateLister = append(f.analysisTemplateLister, at) + f.analysisRunLister = append(f.analysisRunLister, ar) + f.objects = append(f.objects, r2, at, ar) + + _ = f.expectPatchAnalysisRunAction(ar) + patchIndex := f.expectPatchRolloutAction(r2) + _ = f.expectUpdateReplicaSetAction(rs1) + f.run(getKey(r2, t)) + + patch := f.getPatchedRollout(patchIndex) + + assert.Contains(t, patch, `"currentBackgroundAnalysisRunStatus":null`) +} + +func concatMultipleSlices[T any](slices [][]T) []T { + var totalLen int + + for _, s := range slices { + totalLen += len(s) + } + + result := make([]T, totalLen) + + var i int + + for _, s := range slices { + i += copy(result[i:], s) + } + + return result +} diff --git a/rollout/bluegreen.go b/rollout/bluegreen.go index f1bcf7a7bb..9904de9dab 100644 --- a/rollout/bluegreen.go +++ b/rollout/bluegreen.go @@ -1,6 +1,7 @@ package rollout import ( + "fmt" "math" "sort" @@ -22,7 +23,7 @@ func (c *rolloutContext) rolloutBlueGreen() error { } c.newRS, err = c.getAllReplicaSetsAndSyncRevision(true) if err != nil { - return err + return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutBlueGreen create true: %w", err) } // This must happen right after the new replicaset is created @@ -82,6 +83,9 @@ func (c *rolloutContext) reconcileBlueGreenStableReplicaSet(activeSvc *corev1.Se c.log.Infof("Reconciling stable ReplicaSet '%s'", activeRS.Name) _, _, err := c.scaleReplicaSetAndRecordEvent(activeRS, defaults.GetReplicasOrDefault(c.rollout.Spec.Replicas)) + if err != nil { + return fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in reconcileBlueGreenStableReplicaSet: %w", err) + } return err } @@ -243,7 +247,7 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForBlueGreen(oldRSs []*appsv1.Re // Scale down. _, _, err = c.scaleReplicaSetAndRecordEvent(targetRS, desiredReplicaCount) if err != nil { - return false, err + return false, fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in scaleDownOldReplicaSetsForBlueGreen: %w", err) } hasScaled = true } diff --git a/rollout/bluegreen_test.go b/rollout/bluegreen_test.go index cff894e8ca..2ff9515fe3 100644 --- a/rollout/bluegreen_test.go +++ b/rollout/bluegreen_test.go @@ -428,7 +428,7 @@ func TestBlueGreenHandlePause(t *testing.T) { r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r2 := bumpVersion(r1) r2.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "test", }}, } diff --git a/rollout/canary.go b/rollout/canary.go index b443db507e..a3033fea02 100644 --- a/rollout/canary.go +++ b/rollout/canary.go @@ -1,6 +1,7 @@ package rollout import ( + "fmt" "sort" appsv1 "k8s.io/api/apps/v1" @@ -21,14 +22,14 @@ func (c *rolloutContext) rolloutCanary() error { if replicasetutil.PodTemplateOrStepsChanged(c.rollout, c.newRS) { c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false) if err != nil { - return err + return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutCanary with PodTemplateOrStepsChanged: %w", err) } return c.syncRolloutStatusCanary() } c.newRS, err = c.getAllReplicaSetsAndSyncRevision(true) if err != nil { - return err + return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutCanary create true: %w", err) } err = c.podRestarter.Reconcile(c) @@ -110,6 +111,9 @@ func (c *rolloutContext) reconcileCanaryStableReplicaSet() (bool, error) { _, desiredStableRSReplicaCount = replicasetutil.CalculateReplicaCountsForTrafficRoutedCanary(c.rollout, c.rollout.Status.Canary.Weights) } scaled, _, err := c.scaleReplicaSetAndRecordEvent(c.stableRS, desiredStableRSReplicaCount) + if err != nil { + return scaled, fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in reconcileCanaryStableReplicaSet: %w", err) + } return scaled, err } @@ -180,7 +184,7 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli annotationedRSs := int32(0) for _, targetRS := range oldRSs { - if c.isReplicaSetReferenced(targetRS) { + if c.rollout.Spec.Strategy.Canary.TrafficRouting != nil && c.isReplicaSetReferenced(targetRS) { // We might get here if user interrupted an an update in order to move back to stable. c.log.Infof("Skip scale down of older RS '%s': still referenced", targetRS.Name) continue @@ -230,7 +234,7 @@ func (c *rolloutContext) scaleDownOldReplicaSetsForCanary(oldRSs []*appsv1.Repli // Scale down. _, _, err = c.scaleReplicaSetAndRecordEvent(targetRS, desiredReplicaCount) if err != nil { - return totalScaledDown, err + return totalScaledDown, fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in scaleDownOldReplicaSetsForCanary: %w", err) } scaleDownCount := *targetRS.Spec.Replicas - desiredReplicaCount maxScaleDown -= scaleDownCount @@ -292,7 +296,7 @@ func (c *rolloutContext) canProceedWithScaleDownAnnotation(oldRSs []*appsv1.Repl // AWS API calls. return true, nil } - stableSvcName, _ := trafficrouting.GetStableAndCanaryServices(c.rollout) + stableSvcName, _ := trafficrouting.GetStableAndCanaryServices(c.rollout, true) stableSvc, err := c.servicesLister.Services(c.rollout.Namespace).Get(stableSvcName) if err != nil { return false, err @@ -435,6 +439,13 @@ func (c *rolloutContext) reconcileCanaryReplicaSets() (bool, error) { return true, nil } + // If we have updated both the replica count and the pod template hash c.newRS will be nil we want to reconcile the newRS so we look at the + // rollout status to get the newRS to reconcile it. + if c.newRS == nil && c.rollout.Status.CurrentPodHash != c.rollout.Status.StableRS { + rs, _ := replicasetutil.GetReplicaSetByTemplateHash(c.allRSs, c.rollout.Status.CurrentPodHash) + c.newRS = rs + } + scaledNewRS, err := c.reconcileNewReplicaSet() if err != nil { return false, err diff --git a/rollout/canary_test.go b/rollout/canary_test.go index b275170eec..3d1eec9d14 100644 --- a/rollout/canary_test.go +++ b/rollout/canary_test.go @@ -1,12 +1,19 @@ package rollout import ( + "context" "encoding/json" "fmt" + "os" "strconv" "testing" "time" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + k8stesting "k8s.io/client-go/testing" + "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/apps/v1" @@ -23,6 +30,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" "github.com/argoproj/argo-rollouts/utils/hash" + ingressutil "github.com/argoproj/argo-rollouts/utils/ingress" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" @@ -549,6 +557,92 @@ func TestCanaryRolloutCreateFirstReplicasetWithSteps(t *testing.T) { assert.JSONEq(t, calculatePatch(r, expectedPatch), patch) } +func TestCanaryRolloutWithMaxWeightInTrafficRouting(t *testing.T) { + testCases := []struct { + name string + maxWeight *int32 + setWeight int32 + expectedCreatedReplicas int32 + expectedUpdatedReplicas int32 + }{ + { + name: "max weight 100", + maxWeight: int32Ptr(100), + setWeight: 10, + expectedCreatedReplicas: 0, + expectedUpdatedReplicas: 1, + }, + { + name: "max weight 1000", + maxWeight: int32Ptr(1000), + setWeight: 200, + expectedCreatedReplicas: 0, + expectedUpdatedReplicas: 2, + }, + } + + for _, tc := range testCases { + f := newFixture(t) + defer f.Close() + steps := []v1alpha1.CanaryStep{{ + SetWeight: int32Ptr(tc.setWeight), + }} + r1 := newCanaryRollout("foo", 10, nil, steps, int32Ptr(0), intstr.FromInt(1), intstr.FromInt(0)) + + canarySVCName := "canary" + stableSVCName := "stable" + + ingressName := "ingress" + r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + MaxTrafficWeight: tc.maxWeight, + Nginx: &v1alpha1.NginxTrafficRouting{ + StableIngress: ingressName, + }, + } + r1.Spec.Strategy.Canary.StableService = stableSVCName + r1.Spec.Strategy.Canary.CanaryService = canarySVCName + r1.Status.StableRS = "895c6c4f9" + r2 := bumpVersion(r1) + + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + rs1 := newReplicaSetWithStatus(r1, 10, 10) + rs2 := newReplicaSetWithStatus(r2, 1, 0) + + stableSvc := newService(stableSVCName, 80, + map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r1) + + canarySvc := newService(canarySVCName, 80, + map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r1) + f.replicaSetLister = append(f.replicaSetLister, rs1) + + ing := newIngress(ingressName, canarySvc, stableSvc) + ing.Spec.Rules[0].HTTP.Paths[0].Backend.ServiceName = stableSVCName + f.kubeobjects = append(f.kubeobjects, rs1, canarySvc, stableSvc, ing) + f.serviceLister = append(f.serviceLister, canarySvc, stableSvc) + f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ing)) + + createdRSIndex := f.expectCreateReplicaSetAction(rs2) + updatedRSIndex := f.expectUpdateReplicaSetAction(rs2) + updatedRolloutIndex := f.expectUpdateRolloutStatusAction(r2) + f.expectPatchRolloutAction(r2) + f.run(getKey(r2, t)) + + createdRS := f.getCreatedReplicaSet(createdRSIndex) + assert.Equal(t, tc.expectedCreatedReplicas, *createdRS.Spec.Replicas) + updatedRS := f.getUpdatedReplicaSet(updatedRSIndex) + assert.Equal(t, tc.expectedUpdatedReplicas, *updatedRS.Spec.Replicas) + + updatedRollout := f.getUpdatedRollout(updatedRolloutIndex) + progressingCondition := conditions.GetRolloutCondition(updatedRollout.Status, v1alpha1.RolloutProgressing) + assert.NotNil(t, progressingCondition) + assert.Equal(t, conditions.NewReplicaSetReason, progressingCondition.Reason) + assert.Equal(t, corev1.ConditionTrue, progressingCondition.Status) + assert.Equal(t, fmt.Sprintf(conditions.NewReplicaSetMessage, createdRS.Name), progressingCondition.Message) + } + +} func TestCanaryRolloutCreateNewReplicaWithCorrectWeight(t *testing.T) { f := newFixture(t) defer f.Close() @@ -823,9 +917,9 @@ func TestRollBackToStable(t *testing.T) { f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) - updatedRSIndex := f.expectUpdateReplicaSetAction(rs1) - f.expectUpdateReplicaSetAction(rs1) - patchIndex := f.expectPatchRolloutAction(r2) + updatedRSIndex := f.expectUpdateReplicaSetAction(rs1) // Bump replicaset revision from 1 to 3 + f.expectUpdateRolloutAction(r2) // Bump rollout revision from 1 to 3 + patchIndex := f.expectPatchRolloutAction(r2) // Patch rollout status f.run(getKey(r2, t)) expectedRS1 := rs1.DeepCopy() @@ -869,7 +963,6 @@ func TestRollBackToActiveReplicaSetWithinWindow(t *testing.T) { f.kubeobjects = append(f.kubeobjects, rs1, rs2) f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) - f.serviceLister = append(f.serviceLister) // Switch back to version 1 r2.Spec.Template = r1.Spec.Template @@ -883,9 +976,9 @@ func TestRollBackToActiveReplicaSetWithinWindow(t *testing.T) { f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) - f.expectUpdateReplicaSetAction(rs1) - f.expectUpdateReplicaSetAction(rs1) - rolloutPatchIndex := f.expectPatchRolloutAction(r2) + f.expectUpdateReplicaSetAction(rs1) // Update replicaset revision from 1 to 3 + f.expectUpdateRolloutAction(r2) // Update rollout revision from 1 to 3 + rolloutPatchIndex := f.expectPatchRolloutAction(r2) // Patch rollout status f.run(getKey(r2, t)) expectedStepIndex := len(steps) @@ -963,7 +1056,8 @@ func TestRollBackToStableAndStepChange(t *testing.T) { f.objects = append(f.objects, r2) updatedRSIndex := f.expectUpdateReplicaSetAction(rs1) - f.expectUpdateReplicaSetAction(rs1) + //f.expectUpdateReplicaSetAction(rs1) + f.expectUpdateRolloutAction(r2) patchIndex := f.expectPatchRolloutAction(r2) f.run(getKey(r2, t)) @@ -1067,6 +1161,8 @@ func TestSyncRolloutWaitAddToQueue(t *testing.T) { f.runController(key, true, false, c, i, k8sI) // When the controller starts, it will enqueue the rollout while syncing the informer and during the reconciliation step + f.enqueuedObjectsLock.Lock() + defer f.enqueuedObjectsLock.Unlock() assert.Equal(t, 2, f.enqueuedObjects[key]) } @@ -1115,6 +1211,8 @@ func TestSyncRolloutIgnoreWaitOutsideOfReconciliationPeriod(t *testing.T) { c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) f.runController(key, true, false, c, i, k8sI) // When the controller starts, it will enqueue the rollout so we expect the rollout to enqueue at least once. + f.enqueuedObjectsLock.Lock() + defer f.enqueuedObjectsLock.Unlock() assert.Equal(t, 1, f.enqueuedObjects[key]) } @@ -1468,7 +1566,7 @@ func TestCanaryRolloutWithInvalidCanaryServiceName(t *testing.T) { patchIndex := f.expectPatchRolloutAction(rollout) f.run(getKey(rollout, t)) - patch := make(map[string]interface{}) + patch := make(map[string]any) patchData := f.getPatchedRollout(patchIndex) err := json.Unmarshal([]byte(patchData), &patch) assert.NoError(t, err) @@ -1478,7 +1576,7 @@ func TestCanaryRolloutWithInvalidCanaryServiceName(t *testing.T) { assert.True(t, ok) assert.Len(t, c, 2) - condition, ok := c[1].(map[string]interface{}) + condition, ok := c[1].(map[string]any) assert.True(t, ok) assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.canaryService: Invalid value: \"invalid-canary\": service \"invalid-canary\" not found", condition["message"]) @@ -1520,7 +1618,7 @@ func TestCanaryRolloutWithInvalidStableServiceName(t *testing.T) { patchIndex := f.expectPatchRolloutAction(rollout) f.run(getKey(rollout, t)) - patch := make(map[string]interface{}) + patch := make(map[string]any) patchData := f.getPatchedRollout(patchIndex) err := json.Unmarshal([]byte(patchData), &patch) assert.NoError(t, err) @@ -1530,7 +1628,7 @@ func TestCanaryRolloutWithInvalidStableServiceName(t *testing.T) { assert.True(t, ok) assert.Len(t, c, 2) - condition, ok := c[1].(map[string]interface{}) + condition, ok := c[1].(map[string]any) assert.True(t, ok) assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.stableService: Invalid value: \"invalid-stable\": service \"invalid-stable\" not found", condition["message"]) @@ -1565,13 +1663,11 @@ func TestCanaryRolloutWithInvalidPingServiceName(t *testing.T) { f.rolloutLister = append(f.rolloutLister, r) f.objects = append(f.objects, r) - f.kubeobjects = append(f.kubeobjects) - f.serviceLister = append(f.serviceLister) patchIndex := f.expectPatchRolloutAction(r) f.run(getKey(r, t)) - patch := make(map[string]interface{}) + patch := make(map[string]any) patchData := f.getPatchedRollout(patchIndex) err := json.Unmarshal([]byte(patchData), &patch) assert.NoError(t, err) @@ -1581,7 +1677,7 @@ func TestCanaryRolloutWithInvalidPingServiceName(t *testing.T) { assert.True(t, ok) assert.Len(t, c, 2) - condition, ok := c[1].(map[string]interface{}) + condition, ok := c[1].(map[string]any) assert.True(t, ok) assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.pingPong.pingService: Invalid value: \"ping-service\": service \"ping-service\" not found", condition["message"]) @@ -1603,7 +1699,7 @@ func TestCanaryRolloutWithInvalidPongServiceName(t *testing.T) { patchIndex := f.expectPatchRolloutAction(r) f.run(getKey(r, t)) - patch := make(map[string]interface{}) + patch := make(map[string]any) patchData := f.getPatchedRollout(patchIndex) err := json.Unmarshal([]byte(patchData), &patch) assert.NoError(t, err) @@ -1613,7 +1709,7 @@ func TestCanaryRolloutWithInvalidPongServiceName(t *testing.T) { assert.True(t, ok) assert.Len(t, c, 2) - condition, ok := c[1].(map[string]interface{}) + condition, ok := c[1].(map[string]any) assert.True(t, ok) assert.Equal(t, conditions.InvalidSpecReason, condition["reason"]) assert.Equal(t, "The Rollout \"foo\" is invalid: spec.strategy.canary.pingPong.pongService: Invalid value: \"pong-service\": service \"pong-service\" not found", condition["message"]) @@ -1699,11 +1795,11 @@ func TestResumeRolloutAfterPauseDuration(t *testing.T) { f.run(getKey(r2, t)) patch := f.getPatchedRollout(patchIndex) - var patchObj map[string]interface{} + var patchObj map[string]any err := json.Unmarshal([]byte(patch), &patchObj) assert.NoError(t, err) - status := patchObj["status"].(map[string]interface{}) + status := patchObj["status"].(map[string]any) assert.Equal(t, float64(2), status["currentStepIndex"]) controllerPause, ok := status["controllerPause"] assert.True(t, ok) @@ -2008,3 +2104,149 @@ func TestIsDynamicallyRollingBackToStable(t *testing.T) { }) } } + +func TestCanaryReplicaAndSpecChangedTogether(t *testing.T) { + f := newFixture(t) + defer f.Close() + + originReplicas := 3 + r1 := newCanaryRollout("foo", originReplicas, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(0)) + canarySVCName := "canary" + stableSVCName := "stable" + r1.Spec.Strategy.Canary.CanaryService = canarySVCName + r1.Spec.Strategy.Canary.StableService = stableSVCName + + stableRS := newReplicaSetWithStatus(r1, originReplicas, originReplicas) + stableSVC := newService(stableSVCName, 80, + map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: stableRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r1) + + r2 := bumpVersion(r1) + canaryRS := newReplicaSetWithStatus(r2, originReplicas, originReplicas) + canarySVC := newService(canarySVCName, 80, + map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: canaryRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r2) + + f.replicaSetLister = append(f.replicaSetLister, canaryRS, stableRS) + f.serviceLister = append(f.serviceLister, canarySVC, stableSVC) + + r3 := bumpVersion(r2) + r3.Spec.Replicas = pointer.Int32(int32(originReplicas) + 5) + r3.Status.StableRS = stableRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + r3.Status.CurrentPodHash = canaryRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + + f.rolloutLister = append(f.rolloutLister, r3) + f.kubeobjects = append(f.kubeobjects, canaryRS, stableRS, canarySVC, stableSVC) + f.objects = append(f.objects, r3) + + ctrl, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := ctrl.newRolloutContext(r3) + assert.NoError(t, err) + err = roCtx.reconcile() + assert.NoError(t, err) + updated, err := f.kubeclient.AppsV1().ReplicaSets(r3.Namespace).Get(context.Background(), canaryRS.Name, metav1.GetOptions{}) + assert.NoError(t, err) + // check the canary one is updated + assert.NotEqual(t, originReplicas, int(*updated.Spec.Replicas)) +} + +func TestSyncRolloutWithConflictInScaleReplicaSet(t *testing.T) { + os.Setenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT", "true") + defer os.Unsetenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT") + + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: int32Ptr(10), + }, { + Pause: &v1alpha1.RolloutPause{ + Duration: v1alpha1.DurationFromInt(10), + }, + }, + } + r1 := newCanaryRollout("foo", 10, nil, steps, int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) + r1.Spec.Template.Labels["rollout.argoproj.io/foo"] = "bar" + + rs1 := newReplicaSetWithStatus(r1, 10, 10) + r1.Spec.Replicas = pointer.Int32(2) + f.kubeobjects = append(f.kubeobjects, rs1) + f.replicaSetLister = append(f.replicaSetLister, rs1) + + f.rolloutLister = append(f.rolloutLister, r1) + f.objects = append(f.objects, r1) + + f.expectPatchRolloutAction(r1) + f.expectUpdateReplicaSetAction(rs1) // attempt to scale replicaset but conflict + patchIndex := f.expectPatchReplicaSetAction(rs1) // instead of update patch replicaset + + key := fmt.Sprintf("%s/%s", r1.Namespace, r1.Name) + c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) + + f.kubeclient.PrependReactor("update", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, action.(k8stesting.UpdateAction).GetObject(), errors.NewConflict(schema.GroupResource{ + Group: "Apps", + Resource: "ReplicaSet", + }, action.(k8stesting.UpdateAction).GetObject().(*appsv1.ReplicaSet).Name, fmt.Errorf("test error")) + }) + + f.runController(key, true, false, c, i, k8sI) + + updatedRs := f.getPatchedReplicaSet(patchIndex) // minus one because update did not happen because conflict + assert.Equal(t, int32(2), *updatedRs.Spec.Replicas) +} + +func TestSyncRolloutWithConflictInSyncReplicaSetRevision(t *testing.T) { + os.Setenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT", "true") + defer os.Unsetenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT") + + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: int32Ptr(10), + }, { + Pause: &v1alpha1.RolloutPause{ + Duration: v1alpha1.DurationFromInt(10), + }, + }, + } + r1 := newCanaryRollout("foo", 3, nil, steps, int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) + r2 := bumpVersion(r1) + + rs1 := newReplicaSetWithStatus(r1, 3, 3) + rs2 := newReplicaSetWithStatus(r2, 3, 3) + rs2.Annotations["rollout.argoproj.io/revision"] = "1" + + f.kubeobjects = append(f.kubeobjects, rs1, rs2) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + key := fmt.Sprintf("%s/%s", r1.Namespace, r1.Name) + c, i, k8sI := f.newController(func() time.Duration { return 30 * time.Minute }) + + f.kubeclient.PrependReactor("update", "replicasets", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &appsv1.ReplicaSet{}, errors.NewConflict(schema.GroupResource{ + Group: "Apps", + Resource: "ReplicaSet", + }, action.(k8stesting.UpdateAction).GetObject().(*appsv1.ReplicaSet).Name, fmt.Errorf("test error")) + }) + + f.expectPatchRolloutAction(r2) + f.expectUpdateReplicaSetAction(rs1) // attempt to update replicaset revision but conflict + patchIndex1 := f.expectPatchReplicaSetAction(rs1) // instead of update patch replicaset + + f.expectUpdateReplicaSetAction(rs2) // attempt to scale replicaset but conflict + patchIndex2 := f.expectPatchReplicaSetAction(rs2) // instead of update patch replicaset + + f.runController(key, true, false, c, i, k8sI) + + updatedRs1 := f.getPatchedReplicaSet(patchIndex1) + assert.Equal(t, "2", updatedRs1.Annotations["rollout.argoproj.io/revision"]) + assert.Equal(t, int32(3), *updatedRs1.Spec.Replicas) + + updatedRs2 := f.getPatchedReplicaSet(patchIndex2) + assert.Equal(t, int32(0), *updatedRs2.Spec.Replicas) +} diff --git a/rollout/controller.go b/rollout/controller.go index e73115d6c8..a7c9dcc3dc 100644 --- a/rollout/controller.go +++ b/rollout/controller.go @@ -4,11 +4,16 @@ import ( "context" "encoding/json" "fmt" + "os" "reflect" "strconv" + "strings" "sync" "time" + "github.com/argoproj/argo-rollouts/utils/annotations" + + "github.com/argoproj/argo-rollouts/utils/diff" "k8s.io/apimachinery/pkg/runtime/schema" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts" @@ -16,11 +21,13 @@ import ( log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + patchtypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" @@ -146,8 +153,8 @@ type reconcilerBase struct { podRestarter RolloutPodRestarter // used for unit testing - enqueueRollout func(obj interface{}) //nolint:structcheck - enqueueRolloutAfter func(obj interface{}, duration time.Duration) //nolint:structcheck + enqueueRollout func(obj any) //nolint:structcheck + enqueueRolloutAfter func(obj any, duration time.Duration) //nolint:structcheck newTrafficRoutingReconciler func(roCtx *rolloutContext) ([]trafficrouting.TrafficRoutingReconciler, error) //nolint:structcheck // recorder is an event recorder for recording Event resources to the Kubernetes API. @@ -215,10 +222,10 @@ func NewController(cfg ControllerConfig) *Controller { ingressWorkqueue: cfg.IngressWorkQueue, metricsServer: cfg.MetricsServer, } - controller.enqueueRollout = func(obj interface{}) { + controller.enqueueRollout = func(obj any) { controllerutil.EnqueueRateLimited(obj, cfg.RolloutWorkQueue) } - controller.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) { + controller.enqueueRolloutAfter = func(obj any, duration time.Duration) { controllerutil.EnqueueAfter(obj, duration, cfg.RolloutWorkQueue) } @@ -235,7 +242,7 @@ func NewController(cfg ControllerConfig) *Controller { log.Info("Setting up event handlers") // Set up an event handler for when rollout resources change cfg.RolloutsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controller.enqueueRollout(obj) ro := unstructuredutil.ObjectToRollout(obj) if ro != nil { @@ -251,7 +258,7 @@ func NewController(cfg ControllerConfig) *Controller { } } }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { oldRollout := unstructuredutil.ObjectToRollout(old) newRollout := unstructuredutil.ObjectToRollout(new) if oldRollout != nil && newRollout != nil { @@ -271,7 +278,7 @@ func NewController(cfg ControllerConfig) *Controller { } controller.enqueueRollout(new) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { logCtx := logutil.WithRollout(ro) logCtx.Info("rollout enqueue due to delete event") @@ -290,10 +297,10 @@ func NewController(cfg ControllerConfig) *Controller { }) cfg.ReplicaSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.RolloutKind, controller.enqueueRollout) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { newRS := new.(*appsv1.ReplicaSet) oldRS := old.(*appsv1.ReplicaSet) if newRS.ResourceVersion == oldRS.ResourceVersion { @@ -303,16 +310,16 @@ func NewController(cfg ControllerConfig) *Controller { } controllerutil.EnqueueParentObject(new, register.RolloutKind, controller.enqueueRollout) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.RolloutKind, controller.enqueueRollout) }, }) cfg.AnalysisRunInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.RolloutKind, controller.enqueueRollout) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { oldAR := unstructuredutil.ObjectToAnalysisRun(old) newAR := unstructuredutil.ObjectToAnalysisRun(new) if oldAR == nil || newAR == nil { @@ -324,7 +331,7 @@ func NewController(cfg ControllerConfig) *Controller { } controllerutil.EnqueueParentObject(new, register.RolloutKind, controller.enqueueRollout) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controllerutil.EnqueueParentObject(obj, register.RolloutKind, controller.enqueueRollout) }, }) @@ -806,38 +813,64 @@ func (c *rolloutContext) getReferencedRolloutAnalyses() (*[]validation.AnalysisT } func (c *rolloutContext) getReferencedAnalysisTemplates(rollout *v1alpha1.Rollout, rolloutAnalysis *v1alpha1.RolloutAnalysis, templateType validation.AnalysisTemplateType, canaryStepIndex int) (*validation.AnalysisTemplatesWithType, error) { - templates := make([]*v1alpha1.AnalysisTemplate, 0) - clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) fldPath := validation.GetAnalysisTemplateWithTypeFieldPath(templateType, canaryStepIndex) - for _, templateRef := range rolloutAnalysis.Templates { + templates, clusterTemplates, err := c.getReferencedAnalysisTemplatesFromRef(&rolloutAnalysis.Templates, fldPath) + + return &validation.AnalysisTemplatesWithType{ + AnalysisTemplates: templates, + ClusterAnalysisTemplates: clusterTemplates, + TemplateType: templateType, + CanaryStepIndex: canaryStepIndex, + }, err +} + +func (c *rolloutContext) getReferencedAnalysisTemplatesFromRef(templateRefs *[]v1alpha1.AnalysisTemplateRef, fieldPath *field.Path) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate, error) { + templates := make([]*v1alpha1.AnalysisTemplate, 0) + clusterTemplates := make([]*v1alpha1.ClusterAnalysisTemplate, 0) + for _, templateRef := range *templateRefs { if templateRef.ClusterScope { template, err := c.clusterAnalysisTemplateLister.Get(templateRef.TemplateName) if err != nil { if k8serrors.IsNotFound(err) { - return nil, field.Invalid(fldPath, templateRef.TemplateName, fmt.Sprintf("ClusterAnalysisTemplate '%s' not found", templateRef.TemplateName)) + return nil, nil, field.Invalid(fieldPath, templateRef.TemplateName, fmt.Sprintf("ClusterAnalysisTemplate '%s' not found", templateRef.TemplateName)) } - return nil, err + return nil, nil, err } clusterTemplates = append(clusterTemplates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerFldPath := field.NewPath("spec", "templates") + innerTemplates, innerClusterTemplates, innerErr := c.getReferencedAnalysisTemplatesFromRef(&template.Spec.Templates, innerFldPath) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } } else { template, err := c.analysisTemplateLister.AnalysisTemplates(c.rollout.Namespace).Get(templateRef.TemplateName) if err != nil { if k8serrors.IsNotFound(err) { - return nil, field.Invalid(fldPath, templateRef.TemplateName, fmt.Sprintf("AnalysisTemplate '%s' not found", templateRef.TemplateName)) + return nil, nil, field.Invalid(fieldPath, templateRef.TemplateName, fmt.Sprintf("AnalysisTemplate '%s' not found", templateRef.TemplateName)) } - return nil, err + return nil, nil, err } templates = append(templates, template) + // Look for nested templates + if template.Spec.Templates != nil { + innerFldPath := field.NewPath("spec", "templates") + innerTemplates, innerClusterTemplates, innerErr := c.getReferencedAnalysisTemplatesFromRef(&template.Spec.Templates, innerFldPath) + if innerErr != nil { + return nil, nil, innerErr + } + clusterTemplates = append(clusterTemplates, innerClusterTemplates...) + templates = append(templates, innerTemplates...) + } } } - - return &validation.AnalysisTemplatesWithType{ - AnalysisTemplates: templates, - ClusterAnalysisTemplates: clusterTemplates, - TemplateType: templateType, - CanaryStepIndex: canaryStepIndex, - }, nil + uniqueTemplates, uniqueClusterTemplates := analysisutil.FilterUniqueTemplates(templates, clusterTemplates) + return uniqueTemplates, uniqueClusterTemplates, nil } func (c *rolloutContext) getReferencedIngresses() (*[]ingressutil.Ingress, error) { @@ -901,7 +934,7 @@ func (c *rolloutContext) getReferencedALBIngresses(canary *v1alpha1.CanaryStrate return &ingresses, nil } -func handleCacheError(name string, childFields []string, value interface{}, err error) (*[]ingressutil.Ingress, error) { +func handleCacheError(name string, childFields []string, value any, err error) (*[]ingressutil.Ingress, error) { if k8serrors.IsNotFound(err) { fldPath := field.NewPath("spec", "strategy", "canary", "trafficRouting") return nil, field.Invalid(fldPath.Child(name, childFields...), value, err.Error()) @@ -922,3 +955,92 @@ func remarshalRollout(r *v1alpha1.Rollout) *v1alpha1.Rollout { } return &remarshalled } + +// updateReplicaSetWithPatch updates the replicaset using Update and on failure falls back to a patch this function only exists to make sure we always can update +// replicasets and to not get into an conflict loop updating replicasets. We should really look into a complete refactor of how rollouts handles replicasets such +// that we do not keep a fully replicaset on the rollout context under newRS and instead switch to a patch only based approach. +func (c *rolloutContext) updateReplicaSetFallbackToPatch(ctx context.Context, rs *appsv1.ReplicaSet) (*appsv1.ReplicaSet, error) { + updatedRS, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Update(ctx, rs, metav1.UpdateOptions{}) + if err != nil { + if errors.IsConflict(err) { + if os.Getenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT") == "true" { + rsGet, err := c.replicaSetLister.ReplicaSets(rs.Namespace).Get(rs.Name) + if err != nil { + return nil, fmt.Errorf("error getting replicaset in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + rsGetJson, err := json.Marshal(rsGet) + if err != nil { + return nil, fmt.Errorf("error marshalling informer replicaset in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + rsCopyJson, err := json.Marshal(rs) + if err != nil { + return nil, fmt.Errorf("error marshalling memory replicaset in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + c.log.Infof("Informer RS: %s", rsGetJson) + c.log.Infof("Memory RS: %s", rsCopyJson) + } + + c.log.Infof("Conflict when updating replicaset %s, falling back to patch", rs.Name) + + patchRS := appsv1.ReplicaSet{} + patchRS.Spec.Replicas = rs.Spec.Replicas + patchRS.Spec.Template.Labels = rs.Spec.Template.Labels + patchRS.Spec.Template.Annotations = rs.Spec.Template.Annotations + + patchRS.Annotations = make(map[string]string) + patchRS.Labels = make(map[string]string) + patchRS.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: make(map[string]string), + } + + if _, found := rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]; found { + patchRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] = rs.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + } + + if _, found := rs.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey]; found { + patchRS.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = rs.Labels[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] + } + + if _, found := rs.Spec.Selector.MatchLabels[v1alpha1.DefaultRolloutUniqueLabelKey]; found { + patchRS.Spec.Selector.MatchLabels[v1alpha1.DefaultRolloutUniqueLabelKey] = rs.Spec.Selector.MatchLabels[v1alpha1.DefaultRolloutUniqueLabelKey] + } + + for key, value := range rs.Annotations { + if strings.HasPrefix(key, annotations.RolloutLabel) || + strings.HasPrefix(key, "argo-rollouts.argoproj.io") || + strings.HasPrefix(key, "experiment.argoproj.io") { + patchRS.Annotations[key] = value + } + } + for key, value := range rs.Labels { + if strings.HasPrefix(key, annotations.RolloutLabel) || + strings.HasPrefix(key, "argo-rollouts.argoproj.io") || + strings.HasPrefix(key, "experiment.argoproj.io") { + patchRS.Labels[key] = value + } + } + + patch, _, err := diff.CreateTwoWayMergePatch(appsv1.ReplicaSet{}, patchRS, appsv1.ReplicaSet{}) + if err != nil { + return nil, fmt.Errorf("error creating patch for conflict log in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + + c.log.Infof("Patching replicaset with patch: %s", string(patch)) + updatedRS, err = c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + return nil, fmt.Errorf("error patching replicaset in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + + err = c.replicaSetInformer.GetIndexer().Update(updatedRS) + if err != nil { + return nil, fmt.Errorf("error updating replicaset informer in updateReplicaSetFallbackToPatch %s: %w", rs.Name, err) + } + + return updatedRS, err + } + } + if updatedRS != nil { + updatedRS.DeepCopyInto(rs) + } + return rs, err +} diff --git a/rollout/controller_test.go b/rollout/controller_test.go index a893353053..e38fca79c5 100644 --- a/rollout/controller_test.go +++ b/rollout/controller_test.go @@ -99,10 +99,12 @@ type fixture struct { kubeactions []core.Action actions []core.Action // Objects from here preloaded into NewSimpleFake. - kubeobjects []runtime.Object - objects []runtime.Object - enqueuedObjects map[string]int - unfreezeTime func() error + kubeobjects []runtime.Object + objects []runtime.Object + // Acquire 'enqueuedObjectsLock' before accessing enqueuedObjects + enqueuedObjects map[string]int + enqueuedObjectsLock sync.Mutex + unfreezeTime func() error // events holds all the K8s Event Reasons emitted during the run events []string @@ -116,9 +118,12 @@ func newFixture(t *testing.T) *fixture { f.kubeobjects = []runtime.Object{} f.enqueuedObjects = make(map[string]int) now := time.Now() - timeutil.Now = func() time.Time { return now } + + timeutil.SetNowTimeFunc(func() time.Time { + return now + }) f.unfreezeTime = func() error { - timeutil.Now = time.Now + timeutil.SetNowTimeFunc(time.Now) return nil } @@ -494,12 +499,12 @@ func calculatePatch(ro *v1alpha1.Rollout, patch string) string { json.Unmarshal(newBytes, newRO) newObservedGen := strconv.Itoa(int(newRO.Generation)) - newPatch := make(map[string]interface{}) + newPatch := make(map[string]any) err = json.Unmarshal([]byte(patch), &newPatch) if err != nil { panic(err) } - newStatus := newPatch["status"].(map[string]interface{}) + newStatus := newPatch["status"].(map[string]any) newStatus["observedGeneration"] = newObservedGen newPatch["status"] = newStatus newPatchBytes, _ := json.Marshal(newPatch) @@ -507,7 +512,7 @@ func calculatePatch(ro *v1alpha1.Rollout, patch string) string { } func cleanPatch(expectedPatch string) string { - patch := make(map[string]interface{}) + patch := make(map[string]any) err := json.Unmarshal([]byte(expectedPatch), &patch) if err != nil { panic(err) @@ -598,15 +603,15 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share RefResolver: &FakeWorkloadRefResolver{}, }) - var enqueuedObjectsLock sync.Mutex - c.enqueueRollout = func(obj interface{}) { + c.enqueueRollout = func(obj any) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { panic(err) } - enqueuedObjectsLock.Lock() - defer enqueuedObjectsLock.Unlock() + + f.enqueuedObjectsLock.Lock() + defer f.enqueuedObjectsLock.Unlock() count, ok := f.enqueuedObjects[key] if !ok { count = 0 @@ -615,7 +620,7 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share f.enqueuedObjects[key] = count c.rolloutWorkqueue.Add(obj) } - c.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) { + c.enqueueRolloutAfter = func(obj any, duration time.Duration) { c.enqueueRollout(obj) } c.newTrafficRoutingReconciler = func(roCtx *rolloutContext) ([]trafficrouting.TrafficRoutingReconciler, error) { @@ -720,7 +725,8 @@ func (f *fixture) runController(rolloutName string, startInformers bool, expectE f.t.Errorf("%d expected actions did not happen:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):]) } fakeRecorder := c.recorder.(*record.FakeEventRecorder) - f.events = fakeRecorder.Events + + f.events = fakeRecorder.Events() return c } @@ -786,6 +792,12 @@ func (f *fixture) expectPatchServiceAction(s *corev1.Service, newLabel string) i return len } +func (f *fixture) expectGetReplicaSetAction(r *appsv1.ReplicaSet) int { //nolint:unused + len := len(f.kubeactions) + f.kubeactions = append(f.kubeactions, core.NewGetAction(schema.GroupVersionResource{Resource: "replicasets"}, r.Namespace, r.Name)) + return len +} + func (f *fixture) expectCreateReplicaSetAction(r *appsv1.ReplicaSet) int { len := len(f.kubeactions) f.kubeactions = append(f.kubeactions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, r.Namespace, r)) @@ -944,6 +956,21 @@ func (f *fixture) getUpdatedReplicaSet(index int) *appsv1.ReplicaSet { return rs } +func (f *fixture) getPatchedReplicaSet(index int) *appsv1.ReplicaSet { + action := filterInformerActions(f.kubeclient.Actions())[index] + patchAction, ok := action.(core.PatchAction) + if !ok { + f.t.Fatalf("Expected Patch action, not %s", action.GetVerb()) + } + + rs := appsv1.ReplicaSet{} + err := json.Unmarshal(patchAction.GetPatch(), &rs) + if err != nil { + panic(err) + } + return &rs +} + func (f *fixture) verifyPatchedReplicaSet(index int, scaleDownDelaySeconds int32) { action := filterInformerActions(f.kubeclient.Actions())[index] patchAction, ok := action.(core.PatchAction) @@ -1078,7 +1105,7 @@ func (f *fixture) getPatchedRolloutWithoutConditions(index int) string { if !ok { f.t.Fatalf("Expected Patch action, not %s", action.GetVerb()) } - ro := make(map[string]interface{}) + ro := make(map[string]any) err := json.Unmarshal(patchAction.GetPatch(), &ro) if err != nil { f.t.Fatalf("Unable to unmarshal Patch") @@ -1579,7 +1606,7 @@ func TestGetReferencedAnalyses(t *testing.T) { defer f.Close() rolloutAnalysisFail := v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "does-not-exist", ClusterScope: false, }}, @@ -1638,12 +1665,12 @@ func TestGetReferencedAnalyses(t *testing.T) { }) } -func TestGetReferencedAnalysisTemplate(t *testing.T) { +func TestGetReferencedClusterAnalysisTemplate(t *testing.T) { f := newFixture(t) defer f.Close() r := newBlueGreenRollout("rollout", 1, nil, "active-service", "preview-service") roAnalysisTemplate := &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{{ + Templates: []v1alpha1.AnalysisTemplateRef{{ TemplateName: "cluster-analysis-template-name", ClusterScope: true, }}, @@ -1659,7 +1686,53 @@ func TestGetReferencedAnalysisTemplate(t *testing.T) { }) t.Run("get referenced analysisTemplate - success", func(t *testing.T) { - f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplate("cluster-analysis-template-name")) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplate("cluster-analysis-template-name", "cluster-example")) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedAnalysisTemplates(r, roAnalysisTemplate, validation.PrePromotionAnalysis, 0) + assert.NoError(t, err) + }) +} + +func TestGetInnerReferencedAnalysisTemplate(t *testing.T) { + f := newFixture(t) + defer f.Close() + r := newBlueGreenRollout("rollout", 1, nil, "active-service", "preview-service") + roAnalysisTemplate := &v1alpha1.RolloutAnalysis{ + Templates: []v1alpha1.AnalysisTemplateRef{{ + TemplateName: "first-cluster-analysis-template-name", + ClusterScope: true, + }}, + } + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplateWithAnalysisRefs("first-cluster-analysis-template-name", "second-cluster-analysis-template-name", "third-cluster-analysis-template-name")) + + t.Run("get inner referenced analysisTemplate - fail", func(t *testing.T) { + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedAnalysisTemplates(r, roAnalysisTemplate, validation.PrePromotionAnalysis, 0) + expectedErr := field.Invalid(field.NewPath("spec", "templates"), "second-cluster-analysis-template-name", "ClusterAnalysisTemplate 'second-cluster-analysis-template-name' not found") + assert.Error(t, err) + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("get inner referenced analysisTemplate second level - fail", func(t *testing.T) { + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplate("second-cluster-analysis-template-name", "cluster-example")) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplateWithAnalysisRefs("third-cluster-analysis-template-name", "fourth-cluster-analysis-template-name")) + c, _, _ := f.newController(noResyncPeriodFunc) + roCtx, err := c.newRolloutContext(r) + assert.NoError(t, err) + _, err = roCtx.getReferencedAnalysisTemplates(r, roAnalysisTemplate, validation.PrePromotionAnalysis, 0) + expectedErr := field.Invalid(field.NewPath("spec", "templates"), "fourth-cluster-analysis-template-name", "ClusterAnalysisTemplate 'fourth-cluster-analysis-template-name' not found") + assert.Error(t, err) + assert.Equal(t, expectedErr.Error(), err.Error()) + }) + + t.Run("get inner referenced analysisTemplate - success", func(t *testing.T) { + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplate("second-cluster-analysis-template-name", "cluster-example")) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplateWithAnalysisRefs("third-cluster-analysis-template-name", "fourth-cluster-analysis-template-name")) + f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplate("fourth-cluster-analysis-template-name", "cluster-example")) c, _, _ := f.newController(noResyncPeriodFunc) roCtx, err := c.newRolloutContext(r) assert.NoError(t, err) diff --git a/rollout/ephemeralmetadata.go b/rollout/ephemeralmetadata.go index 92fc5d2670..4f502a78ab 100644 --- a/rollout/ephemeralmetadata.go +++ b/rollout/ephemeralmetadata.go @@ -2,6 +2,7 @@ package rollout import ( "context" + "fmt" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -81,10 +82,12 @@ func (c *rolloutContext) syncEphemeralMetadata(ctx context.Context, rs *appsv1.R } // 2. Update ReplicaSet so that any new pods it creates will have the metadata - _, err = c.kubeclientset.AppsV1().ReplicaSets(modifiedRS.Namespace).Update(ctx, modifiedRS, metav1.UpdateOptions{}) + rs, err = c.updateReplicaSetFallbackToPatch(ctx, modifiedRS) if err != nil { - return err + c.log.Infof("failed to sync ephemeral metadata %v to ReplicaSet %s: %v", podMetadata, rs.Name, err) + return fmt.Errorf("failed to sync ephemeral metadata: %w", err) } + c.log.Infof("synced ephemeral metadata %v to ReplicaSet %s", podMetadata, rs.Name) return nil } diff --git a/rollout/ephemeralmetadata_test.go b/rollout/ephemeralmetadata_test.go index 24f76d6bac..59daf1be0e 100644 --- a/rollout/ephemeralmetadata_test.go +++ b/rollout/ephemeralmetadata_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -239,3 +240,36 @@ func TestSyncBlueGreenEphemeralMetadataSecondRevision(t *testing.T) { updatedPod := f.getUpdatedPod(podIdx) assert.Equal(t, expectedStableLabels, updatedPod.Labels) } + +func TestReconcileEphemeralMetadata(t *testing.T) { + newRS := &v1.ReplicaSet{} + stableRS := &v1.ReplicaSet{} + + mockContext := &rolloutContext{ + rollout: &v1alpha1.Rollout{ + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + CanaryMetadata: &v1alpha1.PodTemplateMetadata{}, + StableMetadata: &v1alpha1.PodTemplateMetadata{}, + }, + }, + }, + Status: v1alpha1.RolloutStatus{ + StableRS: "some-stable-rs-hash", + }, + }, + newRS: newRS, + stableRS: stableRS, + otherRSs: []*v1.ReplicaSet{new(v1.ReplicaSet), new(v1.ReplicaSet)}, + } + + // Scenario 1: upgrading state when the new ReplicaSet is a canary + err := mockContext.reconcileEphemeralMetadata() + assert.NoError(t, err) + + // Scenario 2: Sync stable metadata to the stable ReplicaSet + mockContext.rollout.Status.StableRS = "" // Set stable ReplicaSet to empty to simulate an upgrading state + err = mockContext.reconcileEphemeralMetadata() + assert.NoError(t, err) +} diff --git a/rollout/experiment.go b/rollout/experiment.go index 9187c73dcc..7b28cc280b 100644 --- a/rollout/experiment.go +++ b/rollout/experiment.go @@ -50,6 +50,8 @@ func GetExperimentFromTemplate(r *v1alpha1.Rollout, stableRS, newRS *appsv1.Repl Spec: v1alpha1.ExperimentSpec{ Duration: step.Duration, ProgressDeadlineSeconds: r.Spec.ProgressDeadlineSeconds, + DryRun: step.DryRun, + AnalysisRunMetadata: step.AnalysisRunMetadata, }, } diff --git a/rollout/experiment_test.go b/rollout/experiment_test.go index 233dd16ca5..6dabeb47f7 100644 --- a/rollout/experiment_test.go +++ b/rollout/experiment_test.go @@ -76,7 +76,7 @@ func TestRolloutCreateClusterTemplateExperiment(t *testing.T) { f := newFixture(t) defer f.Close() - cat := clusterAnalysisTemplate("bar") + cat := clusterAnalysisTemplate("bar", "cluster-example") steps := []v1alpha1.CanaryStep{{ Experiment: &v1alpha1.RolloutExperimentStep{ Templates: []v1alpha1.RolloutExperimentTemplate{{ @@ -907,3 +907,77 @@ func TestRolloutCreateWeightlessExperimentWithService(t *testing.T) { assert.Equal(t, "canary-weightless-template", ex.Spec.Templates[1].Name) assert.Nil(t, ex.Spec.Templates[1].Service) } + +// The Dry run and metadata should be forwarded from the rollout spec to the experiment spec +func TestRolloutCreateExperimentWithDryRunAndMetadata(t *testing.T) { + f := newFixture(t) + defer f.Close() + + at := analysisTemplate("bar") + steps := []v1alpha1.CanaryStep{{ + Experiment: &v1alpha1.RolloutExperimentStep{ + Templates: []v1alpha1.RolloutExperimentTemplate{{ + Name: "stable-template", + SpecRef: v1alpha1.StableSpecRef, + Replicas: pointer.Int32(1), + }}, + Analyses: []v1alpha1.RolloutExperimentStepAnalysisTemplateRef{{ + Name: "test", + TemplateName: at.Name, + }}, + AnalysisRunMetadata: v1alpha1.AnalysisRunMetadata{ + Labels: map[string]string{ + "foo": "bar", + "foo2": "bar2", + }, + Annotations: map[string]string{ + "bar": "foo", + "bar2": "foo2", + }, + }, + DryRun: []v1alpha1.DryRun{ + { + MetricName: "someMetric", + }, + { + MetricName: "someOtherMetric", + }, + }, + }, + }} + + r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1)) + r2 := bumpVersion(r1) + + rs1 := newReplicaSetWithStatus(r1, 1, 1) + rs2 := newReplicaSetWithStatus(r2, 0, 0) + f.kubeobjects = append(f.kubeobjects, rs1, rs2) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + + ex, _ := GetExperimentFromTemplate(r2, rs1, rs2) + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 1, 0, 1, false) + + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + createExIndex := f.expectCreateExperimentAction(ex) + f.expectPatchRolloutAction(r1) + + f.run(getKey(r2, t)) + createdEx := f.getCreatedExperiment(createExIndex) + assert.Equal(t, createdEx.Name, ex.Name) + assert.Equal(t, createdEx.Spec.Analyses[0].TemplateName, at.Name) + assert.Equal(t, createdEx.Spec.Analyses[0].Name, "test") + + assert.Len(t, createdEx.Spec.AnalysisRunMetadata.Labels, 2) + assert.Equal(t, createdEx.Spec.AnalysisRunMetadata.Labels["foo"], "bar") + assert.Equal(t, createdEx.Spec.AnalysisRunMetadata.Labels["foo2"], "bar2") + assert.Len(t, createdEx.Spec.AnalysisRunMetadata.Annotations, 2) + assert.Equal(t, createdEx.Spec.AnalysisRunMetadata.Annotations["bar"], "foo") + assert.Equal(t, createdEx.Spec.AnalysisRunMetadata.Annotations["bar2"], "foo2") + + assert.Len(t, createdEx.Spec.DryRun, 2) + assert.Equal(t, createdEx.Spec.DryRun[0].MetricName, "someMetric") + assert.Equal(t, createdEx.Spec.DryRun[1].MetricName, "someOtherMetric") +} diff --git a/rollout/replicaset.go b/rollout/replicaset.go index 5eac105807..80f85f6f00 100644 --- a/rollout/replicaset.go +++ b/rollout/replicaset.go @@ -36,7 +36,7 @@ func (c *rolloutContext) removeScaleDownDelay(rs *appsv1.ReplicaSet) error { return nil } patch := fmt.Sprintf(removeScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey) - _, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{}) + rs, err := c.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{}) if err != nil { return fmt.Errorf("error removing scale-down-deadline annotation from RS '%s': %w", rs.Name, err) } @@ -176,6 +176,22 @@ func (c *rolloutContext) reconcileNewReplicaSet() (bool, error) { } scaled, _, err := c.scaleReplicaSetAndRecordEvent(c.newRS, newReplicasCount) + + if err != nil { + return scaled, fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in reconcileNewReplicaSet: %w", err) + } + + revision, _ := replicasetutil.Revision(c.newRS) + + if revision == 1 && c.rollout.Spec.WorkloadRef != nil && c.rollout.Spec.WorkloadRef.ScaleDown == v1alpha1.ScaleDownProgressively { + oldScale := defaults.GetReplicasOrDefault(c.newRS.Spec.Replicas) + // scale down the deployment when the rollout has ready replicas or scale up the deployment if rollout fails + if c.rollout.Spec.Replicas != nil && (c.rollout.Status.ReadyReplicas > 0 || oldScale > newReplicasCount) { + targetScale := *c.rollout.Spec.Replicas - c.rollout.Status.ReadyReplicas + err = c.scaleDeployment(&targetScale) + } + } + return scaled, err } @@ -267,7 +283,7 @@ func (c *rolloutContext) cleanupUnhealthyReplicas(oldRSs []*appsv1.ReplicaSet) ( } _, updatedOldRS, err := c.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount) if err != nil { - return nil, totalScaledDown, err + return nil, totalScaledDown, fmt.Errorf("failed to scaleReplicaSetAndRecordEvent in cleanupUnhealthyReplicas: %w", err) } totalScaledDown += scaledDownCount oldRSs[i] = updatedOldRS diff --git a/rollout/replicaset_test.go b/rollout/replicaset_test.go index d93fa7c546..2e61606c37 100644 --- a/rollout/replicaset_test.go +++ b/rollout/replicaset_test.go @@ -1,6 +1,7 @@ package rollout import ( + "context" "fmt" "strconv" "testing" @@ -574,3 +575,70 @@ func TestIsReplicaSetReferenced(t *testing.T) { }) } } + +func TestScaleDownProgressively(t *testing.T) { + + tests := []struct { + name string + deploymentReplicas int32 + newRSReplicas int + newRSRevision string + rolloutReplicas int32 + rolloutReadyReplicas int32 + abortScaleDownDelaySeconds int32 + expectedDeploymentReplicas int32 + }{ + { + name: "Scale down deployment", + deploymentReplicas: 5, + newRSReplicas: 5, + newRSRevision: "1", + rolloutReplicas: 5, + rolloutReadyReplicas: 3, + abortScaleDownDelaySeconds: 0, + expectedDeploymentReplicas: 2, + }, + { + name: "Scale up deployment", + deploymentReplicas: 0, + newRSReplicas: 5, + newRSRevision: "1", + rolloutReplicas: 5, + rolloutReadyReplicas: 1, + abortScaleDownDelaySeconds: 0, + expectedDeploymentReplicas: 4, + }, + { + name: "Do not scale deployment", + deploymentReplicas: 5, + newRSReplicas: 5, + newRSRevision: "2", + rolloutReplicas: 5, + rolloutReadyReplicas: 3, + abortScaleDownDelaySeconds: 0, + expectedDeploymentReplicas: 5, + }, + } + + for _, test := range tests { + ctx := createScaleDownRolloutContext(v1alpha1.ScaleDownProgressively, test.deploymentReplicas, true, nil) + ctx.rollout.Spec.Strategy = v1alpha1.RolloutStrategy{ + BlueGreen: &v1alpha1.BlueGreenStrategy{ + AbortScaleDownDelaySeconds: &test.abortScaleDownDelaySeconds, + }, + } + ctx.newRS = rs("foo-v2", test.newRSReplicas, nil, noTimestamp, nil) + ctx.newRS.ObjectMeta.Annotations[annotations.RevisionAnnotation] = test.newRSRevision + ctx.pauseContext.removeAbort = true + ctx.rollout.Spec.Replicas = &test.rolloutReplicas + ctx.rollout.Status.ReadyReplicas = test.rolloutReadyReplicas + + _, err := ctx.reconcileNewReplicaSet() + assert.Nil(t, err) + k8sfakeClient := ctx.kubeclientset.(*k8sfake.Clientset) + updatedDeployment, err := k8sfakeClient.AppsV1().Deployments("default").Get(context.TODO(), "workload-test", metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, test.expectedDeploymentReplicas, *updatedDeployment.Spec.Replicas) + + } +} diff --git a/rollout/restart.go b/rollout/restart.go index a8361bf24f..95f9a97076 100644 --- a/rollout/restart.go +++ b/rollout/restart.go @@ -36,7 +36,7 @@ const ( type RolloutPodRestarter struct { client kubernetes.Interface resyncPeriod time.Duration - enqueueAfter func(obj interface{}, duration time.Duration) + enqueueAfter func(obj any, duration time.Duration) } // checkEnqueueRollout enqueues a Rollout if the Rollout's restartedAt is within the next resync diff --git a/rollout/restart_test.go b/rollout/restart_test.go index 84374a37a4..a6a20b4188 100644 --- a/rollout/restart_test.go +++ b/rollout/restart_test.go @@ -94,7 +94,7 @@ func TestRestartCheckEnqueueRollout(t *testing.T) { log: logrus.WithField("", ""), } p := RolloutPodRestarter{ - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { assert.Fail(t, "Should not enqueue rollout") }, } @@ -108,7 +108,7 @@ func TestRestartCheckEnqueueRollout(t *testing.T) { } p := RolloutPodRestarter{ resyncPeriod: 10 * time.Minute, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { assert.Fail(t, "Should not enqueue rollout") }, } @@ -123,7 +123,7 @@ func TestRestartCheckEnqueueRollout(t *testing.T) { } p := RolloutPodRestarter{ resyncPeriod: 10 * time.Minute, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -139,7 +139,7 @@ func TestRestartCheckEnqueueRollout(t *testing.T) { } p := RolloutPodRestarter{ resyncPeriod: 2 * time.Minute, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -182,7 +182,7 @@ func TestRestartReconcile(t *testing.T) { r := RolloutPodRestarter{ client: client, resyncPeriod: 2 * time.Minute, - enqueueAfter: func(obj interface{}, duration time.Duration) {}, + enqueueAfter: func(obj any, duration time.Duration) {}, } err := r.Reconcile(roCtx) assert.Nil(t, err) @@ -203,7 +203,7 @@ func TestRestartReconcile(t *testing.T) { }) r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) {}, + enqueueAfter: func(obj any, duration time.Duration) {}, } err := r.Reconcile(roCtx) assert.Errorf(t, err, expectedErrMsg) @@ -217,7 +217,7 @@ func TestRestartReconcile(t *testing.T) { } r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) {}, + enqueueAfter: func(obj any, duration time.Duration) {}, } err := r.Reconcile(roCtx) assert.Nil(t, err) @@ -235,7 +235,7 @@ func TestRestartReconcile(t *testing.T) { } r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) {}, + enqueueAfter: func(obj any, duration time.Duration) {}, } err := r.Reconcile(roCtx) assert.Nil(t, err) @@ -252,7 +252,7 @@ func TestRestartReconcile(t *testing.T) { } r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) {}, + enqueueAfter: func(obj any, duration time.Duration) {}, } err := r.Reconcile(roCtx) assert.Nil(t, err) @@ -469,7 +469,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -497,7 +497,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -525,7 +525,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -558,7 +558,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -592,7 +592,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -618,7 +618,7 @@ func TestRestartMaxUnavailable(t *testing.T) { enqueued := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueued = true }, } @@ -650,7 +650,7 @@ func TestRestartRespectPodDisruptionBudget(t *testing.T) { enqueueCalled := false r := RolloutPodRestarter{ client: client, - enqueueAfter: func(obj interface{}, duration time.Duration) { + enqueueAfter: func(obj any, duration time.Duration) { enqueueCalled = true }, } diff --git a/rollout/scale_utils.go b/rollout/scale_utils.go new file mode 100644 index 0000000000..6759d1a56e --- /dev/null +++ b/rollout/scale_utils.go @@ -0,0 +1,36 @@ +package rollout + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (c *rolloutContext) scaleDeployment(targetScale *int32) error { + deploymentName := c.rollout.Spec.WorkloadRef.Name + namespace := c.rollout.Namespace + deployment, err := c.kubeclientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + c.log.Warnf("Failed to fetch deployment %s: %s", deploymentName, err.Error()) + return err + } + + var newReplicasCount int32 + if *targetScale < 0 { + newReplicasCount = 0 + } else { + newReplicasCount = *targetScale + } + if newReplicasCount == *deployment.Spec.Replicas { + return nil + } + c.log.Infof("Scaling deployment %s to %d replicas", deploymentName, newReplicasCount) + *deployment.Spec.Replicas = newReplicasCount + + _, err = c.kubeclientset.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + if err != nil { + c.log.Warnf("Failed to update deployment %s: %s", deploymentName, err.Error()) + return err + } + return nil +} diff --git a/rollout/scale_utils_test.go b/rollout/scale_utils_test.go new file mode 100644 index 0000000000..0127cfab69 --- /dev/null +++ b/rollout/scale_utils_test.go @@ -0,0 +1,182 @@ +package rollout + +import ( + "context" + "fmt" + "testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" + logutil "github.com/argoproj/argo-rollouts/utils/log" + "github.com/argoproj/argo-rollouts/utils/record" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sfake "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" +) + +type DeploymentActions interface { + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.Deployment, error) + Update(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) +} + +type KubeClientInterface interface { + Deployments(namespace string) DeploymentActions + AppsV1() AppV1Interface +} + +type AppV1Interface interface { + Deployments(namespace string) DeploymentActions +} + +type mockDeploymentInterface struct { + deployment *appsv1.Deployment +} + +func (m *mockDeploymentInterface) Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.Deployment, error) { + return m.deployment, nil +} + +func (m *mockDeploymentInterface) Update(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) { + m.deployment = deployment + return deployment, nil +} + +type testKubeClient struct { + mockDeployment DeploymentActions +} + +func (t *testKubeClient) AppsV1() AppV1Interface { + return t +} + +func (t *testKubeClient) Deployments(namespace string) DeploymentActions { + return t.mockDeployment +} + +type testRolloutContext struct { + *rolloutContext + kubeClient KubeClientInterface +} + +func createScaleDownRolloutContext(scaleDownMode string, deploymentReplicas int32, deploymentExists bool, updateError error) *testRolloutContext { + ro := &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout-test", + Namespace: "default", + Annotations: map[string]string{ + "rollout.argoproj.io/revision": "1", + }, + }, + Spec: v1alpha1.RolloutSpec{ + WorkloadRef: &v1alpha1.ObjectRef{ + Name: "workload-test", + ScaleDown: scaleDownMode, + }, + }, + Status: v1alpha1.RolloutStatus{ + Phase: v1alpha1.RolloutPhaseHealthy, + }, + } + + fakeDeployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "workload-test", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &deploymentReplicas, + }, + } + + var k8sfakeClient *k8sfake.Clientset + if deploymentExists { + k8sfakeClient = k8sfake.NewSimpleClientset(fakeDeployment) + } else { + k8sfakeClient = k8sfake.NewSimpleClientset() + } + + if updateError != nil { + k8sfakeClient.PrependReactor("update", "deployments", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, updateError + }) + } + + mockDeploy := &mockDeploymentInterface{deployment: fakeDeployment} + testClient := &testKubeClient{mockDeployment: mockDeploy} + + ctx := &testRolloutContext{ + rolloutContext: &rolloutContext{ + rollout: ro, + pauseContext: &pauseContext{}, + reconcilerBase: reconcilerBase{ + argoprojclientset: &fake.Clientset{}, + kubeclientset: k8sfakeClient, + recorder: record.NewFakeEventRecorder(), + }, + }, + kubeClient: testClient, + } + ctx.log = logutil.WithRollout(ctx.rollout) + + return ctx +} + +func TestScaleDeployment(t *testing.T) { + tests := []struct { + name string + scaleToZero bool + targetScale *int32 + expectedCount int32 + deploymentExists bool + updateError error + }{ + { + name: "Scale down to zero", + targetScale: int32Ptr(0), + expectedCount: 0, + deploymentExists: true, + }, + { + name: "Scale down to a negative value", + targetScale: int32Ptr(-1), + expectedCount: 0, + deploymentExists: true, + }, + { + name: "Deployment is already scaled", + targetScale: int32Ptr(5), + expectedCount: 5, + deploymentExists: true, + }, + { + name: "Error fetching deployment", + targetScale: int32Ptr(0), + deploymentExists: false, + }, + { + name: "Error updating deployment", + scaleToZero: false, + targetScale: int32Ptr(0), + deploymentExists: true, + updateError: fmt.Errorf("fake update error"), + }, + } + + for _, test := range tests { + ctx := createScaleDownRolloutContext(v1alpha1.ScaleDownOnSuccess, 5, test.deploymentExists, test.updateError) + err := ctx.scaleDeployment(test.targetScale) + + if !test.deploymentExists || test.updateError != nil { + assert.NotNil(t, err) + continue + } + assert.Nil(t, err) + k8sfakeClient := ctx.kubeclientset.(*k8sfake.Clientset) + updatedDeployment, err := k8sfakeClient.AppsV1().Deployments("default").Get(context.TODO(), "workload-test", metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, *updatedDeployment.Spec.Replicas, test.expectedCount) + } +} diff --git a/rollout/service.go b/rollout/service.go index 097f4160bf..bd4a3b8d1e 100644 --- a/rollout/service.go +++ b/rollout/service.go @@ -245,7 +245,7 @@ func (c *rolloutContext) getPreviewAndActiveServices() (*corev1.Service, *corev1 func (c *rolloutContext) reconcilePingAndPongService() error { if trafficrouting.IsPingPongEnabled(c.rollout) && !rolloututils.IsFullyPromoted(c.rollout) { - _, canaryService := trafficrouting.GetStableAndCanaryServices(c.rollout) + _, canaryService := trafficrouting.GetStableAndCanaryServices(c.rollout, true) return c.ensureSVCTargets(canaryService, c.newRS, false) } return nil diff --git a/rollout/service_test.go b/rollout/service_test.go index cb15367a3a..0466634bde 100644 --- a/rollout/service_test.go +++ b/rollout/service_test.go @@ -158,7 +158,6 @@ func TestPreviewServiceNotFound(t *testing.T) { activeSvc := newService("active-svc", 80, nil, nil) notUsedPreviewSvc := newService("preview-svc", 80, nil, nil) f.kubeobjects = append(f.kubeobjects, activeSvc) - f.serviceLister = append(f.serviceLister) patchIndex := f.expectPatchRolloutAction(r) f.run(getKey(r, t)) diff --git a/rollout/sync.go b/rollout/sync.go index 5d07958dc1..15559fa785 100644 --- a/rollout/sync.go +++ b/rollout/sync.go @@ -83,17 +83,13 @@ func (c *rolloutContext) syncReplicaSetRevision() (*appsv1.ReplicaSet, error) { affinityNeedsUpdate := replicasetutil.IfInjectedAntiAffinityRuleNeedsUpdate(rsCopy.Spec.Template.Spec.Affinity, *c.rollout) if annotationsUpdated || minReadySecondsNeedsUpdate || affinityNeedsUpdate { + rsCopy.Spec.MinReadySeconds = c.rollout.Spec.MinReadySeconds rsCopy.Spec.Template.Spec.Affinity = replicasetutil.GenerateReplicaSetAffinity(*c.rollout) - rs, err := c.kubeclientset.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{}) - if err != nil { - c.log.WithError(err).Error("Error: updating replicaset revision") - return nil, fmt.Errorf("error updating replicaset revision: %v", err) - } - c.log.Infof("Synced revision on ReplicaSet '%s' to '%s'", rs.Name, newRevision) - err = c.replicaSetInformer.GetIndexer().Update(rs) + + rs, err := c.updateReplicaSetFallbackToPatch(ctx, rsCopy) if err != nil { - return nil, fmt.Errorf("error updating replicaset informer in syncReplicaSetRevision: %w", err) + return nil, fmt.Errorf("failed to update replicaset revision on %s: %w", rsCopy.Name, err) } return rs, nil } @@ -113,7 +109,7 @@ func (c *rolloutContext) syncReplicaSetRevision() (*appsv1.ReplicaSet, error) { conditions.SetRolloutCondition(&c.rollout.Status, *condition) updatedRollout, err := c.argoprojclientset.ArgoprojV1alpha1().Rollouts(c.rollout.Namespace).UpdateStatus(ctx, c.rollout, metav1.UpdateOptions{}) if err != nil { - c.log.WithError(err).Error("Error: updating rollout revision") + c.log.WithError(err).Error("Error: updating rollout status in syncReplicaSetRevision") return nil, err } c.rollout = updatedRollout @@ -245,7 +241,7 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) { cond := conditions.NewRolloutCondition(v1alpha1.RolloutProgressing, corev1.ConditionFalse, conditions.FailedRSCreateReason, msg) patchErr := c.patchCondition(c.rollout, newStatus, cond) if patchErr != nil { - c.log.Warnf("Error Patching Rollout: %s", patchErr.Error()) + c.log.Warnf("Error Patching Rollout Conditions: %s", patchErr.Error()) } return nil, err default: @@ -280,9 +276,10 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) { // syncReplicasOnly is responsible for reconciling rollouts on scaling events. func (c *rolloutContext) syncReplicasOnly() error { c.log.Infof("Syncing replicas only due to scaling event") - _, err := c.getAllReplicaSetsAndSyncRevision(false) + var err error + c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false) if err != nil { - return err + return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in syncReplicasOnly: %w", err) } newStatus := c.rollout.Status.DeepCopy() @@ -295,7 +292,7 @@ func (c *rolloutContext) syncReplicasOnly() error { if err := c.reconcileBlueGreenReplicaSets(activeSvc); err != nil { // If we get an error while trying to scale, the rollout will be requeued // so we can abort this resync - return err + return fmt.Errorf("failed to reconcileBlueGreenReplicaSets in syncReplicasOnly: %w", err) } activeRS, _ := replicasetutil.GetReplicaSetByTemplateHash(c.allRSs, newStatus.BlueGreen.ActiveSelector) if activeRS != nil { @@ -313,7 +310,7 @@ func (c *rolloutContext) syncReplicasOnly() error { if _, err := c.reconcileCanaryReplicaSets(); err != nil { // If we get an error while trying to scale, the rollout will be requeued // so we can abort this resync - return err + return fmt.Errorf("failed to reconcileCanaryReplicaSets in syncReplicasOnly: %w", err) } newStatus.AvailableReplicas = replicasetutil.GetAvailableReplicaCountForReplicaSets(c.allRSs) newStatus.HPAReplicas = replicasetutil.GetActualReplicaCountForReplicaSets(c.allRSs) @@ -326,9 +323,10 @@ func (c *rolloutContext) syncReplicasOnly() error { // // rsList should come from getReplicaSetsForRollout(r). func (c *rolloutContext) isScalingEvent() (bool, error) { - _, err := c.getAllReplicaSetsAndSyncRevision(false) + var err error + c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false) if err != nil { - return false, err + return false, fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in isScalingEvent: %w", err) } for _, rs := range controller.FilterActiveReplicaSets(c.allRSs) { @@ -355,6 +353,9 @@ func (c *rolloutContext) scaleReplicaSetAndRecordEvent(rs *appsv1.ReplicaSet, ne scalingOperation = "down" } scaled, newRS, err := c.scaleReplicaSet(rs, newScale, c.rollout, scalingOperation) + if err != nil { + return scaled, newRS, fmt.Errorf("failed to scaleReplicaSet in scaleReplicaSetAndRecordEvent: %w", err) + } return scaled, newRS, err } @@ -365,25 +366,21 @@ func (c *rolloutContext) scaleReplicaSet(rs *appsv1.ReplicaSet, newScale int32, rolloutReplicas := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) annotationsNeedUpdate := annotations.ReplicasAnnotationsNeedUpdate(rs, rolloutReplicas) - scaled := false var err error + scaled := false if sizeNeedsUpdate || annotationsNeedUpdate { rsCopy := rs.DeepCopy() oldScale := defaults.GetReplicasOrDefault(rs.Spec.Replicas) *(rsCopy.Spec.Replicas) = newScale annotations.SetReplicasAnnotations(rsCopy, rolloutReplicas) if fullScaleDown && !c.shouldDelayScaleDownOnAbort() { + // This bypasses the normal call to removeScaleDownDelay and then depends on the removal via an update in updateReplicaSetFallbackToPatch delete(rsCopy.Annotations, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey) } - rs, err = c.kubeclientset.AppsV1().ReplicaSets(rsCopy.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{}) - if err != nil { - return scaled, rs, fmt.Errorf("error updating replicaset %s: %w", rs.Name, err) - } - err = c.replicaSetInformer.GetIndexer().Update(rs) + rs, err = c.updateReplicaSetFallbackToPatch(ctx, rsCopy) if err != nil { - err = fmt.Errorf("error updating replicaset informer in scaleReplicaSet: %w", err) - return scaled, rs, err + return scaled, rs, fmt.Errorf("failed to updateReplicaSetFallbackToPatch in scaleReplicaSet: %w", err) } if sizeNeedsUpdate { @@ -573,6 +570,19 @@ func isIndefiniteStep(r *v1alpha1.Rollout) bool { return false } +// isWaitingForReplicaSetScaleDown returns whether or not the rollout still has other replica sets with a scale down deadline annotation +func isWaitingForReplicaSetScaleDown(r *v1alpha1.Rollout, newRS, stableRS *appsv1.ReplicaSet, allRSs []*appsv1.ReplicaSet) bool { + otherRSs := replicasetutil.GetOtherRSs(r, newRS, stableRS, allRSs) + + for _, rs := range otherRSs { + if replicasetutil.HasScaleDownDeadline(rs) { + return true + } + } + + return false +} + func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutStatus) v1alpha1.RolloutStatus { isPaused := len(c.rollout.Status.PauseConditions) > 0 || c.rollout.Spec.Paused isAborted := c.pauseContext.IsAborted() @@ -658,7 +668,8 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt conditions.RemoveRolloutCondition(&newStatus, v1alpha1.RolloutProgressing) } conditions.SetRolloutCondition(&newStatus, *condition) - case !isIndefiniteStep(c.rollout) && conditions.RolloutTimedOut(c.rollout, &newStatus): + case !isIndefiniteStep(c.rollout) && !isWaitingForReplicaSetScaleDown(c.rollout, c.newRS, c.stableRS, c.allRSs) && conditions.RolloutTimedOut(c.rollout, &newStatus): + // Update the rollout with a timeout condition. If the condition already exists, // we ignore this update. msg := fmt.Sprintf(conditions.RolloutTimeOutMessage, c.rollout.Name) @@ -1003,6 +1014,7 @@ func (c *rolloutContext) promoteStable(newStatus *v1alpha1.RolloutStatus, reason } } previousStableHash := newStatus.StableRS + revision, _ := replicasetutil.Revision(c.rollout) if previousStableHash != newStatus.CurrentPodHash { // only emit this event when we switched stable if trafficrouting.IsPingPongEnabled(c.rollout) { @@ -1014,9 +1026,17 @@ func (c *rolloutContext) promoteStable(newStatus *v1alpha1.RolloutStatus, reason } newStatus.StableRS = newStatus.CurrentPodHash - revision, _ := replicasetutil.Revision(c.rollout) c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.RolloutCompletedReason}, conditions.RolloutCompletedMessage, revision, newStatus.CurrentPodHash, reason) } + + if revision == 1 && c.rollout.Status.Phase == v1alpha1.RolloutPhaseHealthy && c.rollout.Spec.WorkloadRef != nil && c.rollout.Spec.WorkloadRef.ScaleDown == v1alpha1.ScaleDownOnSuccess { + var targetScale int32 = 0 + err := c.scaleDeployment(&targetScale) + if err != nil { + return err + } + } + return nil } diff --git a/rollout/sync_test.go b/rollout/sync_test.go index 5748bba0ec..7b2552e6c9 100644 --- a/rollout/sync_test.go +++ b/rollout/sync_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -16,12 +17,15 @@ import ( "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-rollouts/utils/annotations" "github.com/argoproj/argo-rollouts/utils/conditions" logutil "github.com/argoproj/argo-rollouts/utils/log" "github.com/argoproj/argo-rollouts/utils/record" timeutil "github.com/argoproj/argo-rollouts/utils/time" + + "context" ) func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time, ownerRef *metav1.OwnerReference) *appsv1.ReplicaSet { @@ -309,7 +313,7 @@ func TestCanaryPromoteFull(t *testing.T) { r1 := newCanaryRollout("foo", 10, nil, steps, int32Ptr(0), intstr.FromInt(10), intstr.FromInt(0)) r1.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{ RolloutAnalysis: v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -355,14 +359,14 @@ func TestBlueGreenPromoteFull(t *testing.T) { r1 := newBlueGreenRollout("foo", 10, nil, "active", "preview") r1.Spec.Strategy.BlueGreen.AutoPromotionEnabled = pointer.BoolPtr(false) r1.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, }, } r1.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &v1alpha1.RolloutAnalysis{ - Templates: []v1alpha1.RolloutAnalysisTemplate{ + Templates: []v1alpha1.AnalysisTemplateRef{ { TemplateName: at.Name, }, @@ -451,7 +455,7 @@ func TestSendStateChangeEvents(t *testing.T) { recorder := record.NewFakeEventRecorder() roCtx.recorder = recorder roCtx.sendStateChangeEvents(&test.prevStatus, &test.newStatus) - assert.Equal(t, test.expectedEventReasons, recorder.Events) + assert.Equal(t, test.expectedEventReasons, recorder.Events()) } } @@ -610,3 +614,28 @@ func Test_shouldFullPromote(t *testing.T) { result = ctx.shouldFullPromote(newStatus) assert.Equal(t, result, "Rollback within window") } + +func TestScaleDownDeploymentOnSuccess(t *testing.T) { + ctx := createScaleDownRolloutContext(v1alpha1.ScaleDownOnSuccess, 5, true, nil) + newStatus := &v1alpha1.RolloutStatus{ + CurrentPodHash: "2f646bf702", + StableRS: "15fb5ffc01", + } + err := ctx.promoteStable(newStatus, "reason") + + assert.Nil(t, err) + k8sfakeClient := ctx.kubeclientset.(*k8sfake.Clientset) + updatedDeployment, err := k8sfakeClient.AppsV1().Deployments("default").Get(context.TODO(), "workload-test", metav1.GetOptions{}) + assert.Nil(t, err) + assert.Equal(t, int32(0), *updatedDeployment.Spec.Replicas) + + // test scale deployment error + ctx = createScaleDownRolloutContext(v1alpha1.ScaleDownOnSuccess, 5, false, nil) + newStatus = &v1alpha1.RolloutStatus{ + CurrentPodHash: "2f646bf702", + StableRS: "15fb5ffc01", + } + err = ctx.promoteStable(newStatus, "reason") + + assert.NotNil(t, err) +} diff --git a/rollout/templateref.go b/rollout/templateref.go index a8faf92b8b..15dab6a27d 100644 --- a/rollout/templateref.go +++ b/rollout/templateref.go @@ -75,7 +75,7 @@ func NewInformerBasedWorkloadRefResolver( ) *informerBasedTemplateResolver { ctx, cancelContext := context.WithCancel(context.TODO()) err := rolloutsInformer.AddIndexers(cache.Indexers{ - templateRefIndexName: func(obj interface{}) ([]string, error) { + templateRefIndexName: func(obj any) ([]string, error) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil && ro.Spec.WorkloadRef != nil { return []string{refKey(*ro.Spec.WorkloadRef, ro.Namespace)}, nil } @@ -115,7 +115,7 @@ func (r *informerBasedTemplateResolver) Stop() { r.cancelContext = cancelContext } -func remarshalMap(objMap map[string]interface{}, res interface{}) error { +func remarshalMap(objMap map[string]any, res any) error { data, err := json.Marshal(objMap) if err != nil { return err @@ -210,13 +210,13 @@ func (r *informerBasedTemplateResolver) newInformerForGVK(gvk schema.GroupVersio cache.Indexers{}, nil) informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { r.updateRolloutsReferenceAnnotation(obj, gvk) }, - UpdateFunc: func(oldObj, newObj interface{}) { + UpdateFunc: func(oldObj, newObj any) { r.updateRolloutsReferenceAnnotation(newObj, gvk) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { r.updateRolloutsReferenceAnnotation(obj, gvk) }, }) @@ -225,7 +225,7 @@ func (r *informerBasedTemplateResolver) newInformerForGVK(gvk schema.GroupVersio } // updateRolloutsReferenceAnnotation update the annotation of all rollouts referenced by given object -func (r *informerBasedTemplateResolver) updateRolloutsReferenceAnnotation(obj interface{}, gvk schema.GroupVersionKind) { +func (r *informerBasedTemplateResolver) updateRolloutsReferenceAnnotation(obj any, gvk schema.GroupVersionKind) { workloadMeta, err := meta.Accessor(obj) if err != nil { return @@ -247,9 +247,9 @@ func (r *informerBasedTemplateResolver) updateRolloutsReferenceAnnotation(obj in updated := annotations.SetRolloutWorkloadRefGeneration(ro, generation) if updated { - patch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ + patch := map[string]any{ + "metadata": map[string]any{ + "annotations": map[string]any{ annotations.WorkloadGenerationAnnotation: ro.Annotations[annotations.WorkloadGenerationAnnotation], }, }, diff --git a/rollout/trafficrouting.go b/rollout/trafficrouting.go index bf03d95ef1..db959ad852 100644 --- a/rollout/trafficrouting.go +++ b/rollout/trafficrouting.go @@ -28,6 +28,7 @@ import ( "github.com/argoproj/argo-rollouts/utils/record" replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" rolloututil "github.com/argoproj/argo-rollouts/utils/rollout" + "github.com/argoproj/argo-rollouts/utils/weightutil" ) // NewTrafficRoutingReconciler identifies return the TrafficRouting Plugin that the rollout wants to modify @@ -134,6 +135,7 @@ func (c *Controller) NewTrafficRoutingReconciler(roCtx *rolloutContext) ([]traff return nil, nil } +// this currently only be used in the canary strategy func (c *rolloutContext) reconcileTrafficRouting() error { reconcilers, err := c.newTrafficRoutingReconciler(c) // a return here does ensure that all trafficReconcilers are healthy @@ -181,13 +183,15 @@ func (c *rolloutContext) reconcileTrafficRouting() error { desiredWeight = c.calculateDesiredWeightOnAbortOrStableRollback() if (c.rollout.Spec.Strategy.Canary.DynamicStableScale && desiredWeight == 0) || !c.rollout.Spec.Strategy.Canary.DynamicStableScale { // If we are using dynamic stable scale we need to also make sure that desiredWeight=0 aka we are completely - // done with aborting before resetting the canary service selectors back to stable - err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.stableRS, true) + // done with aborting before resetting the canary service selectors back to stable. For non-dynamic scale we do not check for availability because we are + // fully aborted and stable pods will be there, if we check for availability it causes issues with ALB readiness gates if all stable pods + // have the desired readiness gate on them during an abort we get stuck in a loop because all the stable go unready and rollouts won't be able + // to switch the desired services because there is no ready pods which causes pods to get stuck progressing forever waiting for readiness. + err = c.ensureSVCTargets(c.rollout.Spec.Strategy.Canary.CanaryService, c.stableRS, false) if err != nil { return err } } - err := reconciler.RemoveManagedRoutes() if err != nil { return err @@ -201,7 +205,7 @@ func (c *rolloutContext) reconcileTrafficRouting() error { // But we can only increase canary weight according to available replica counts of the canary. // we will need to set the desiredWeight to 0 when the newRS is not available. if c.rollout.Spec.Strategy.Canary.DynamicStableScale { - desiredWeight = (100 * c.newRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas + desiredWeight = (weightutil.MaxTrafficWeight(c.rollout) * c.newRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas } else if c.rollout.Status.Canary.Weights != nil { desiredWeight = c.rollout.Status.Canary.Weights.Canary.Weight } @@ -229,7 +233,7 @@ func (c *rolloutContext) reconcileTrafficRouting() error { desiredWeight = replicasetutil.GetCurrentSetWeight(c.rollout) weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...) } else { - desiredWeight = 100 + desiredWeight = weightutil.MaxTrafficWeight(c.rollout) } } // We need to check for revision > 1 because when we first install the rollout we run step 0 this prevents that. @@ -288,6 +292,12 @@ func (c *rolloutContext) reconcileTrafficRouting() error { logCtx := logutil.WithRollout(c.rollout) logCtx.Info("rollout enqueue due to trafficrouting") c.enqueueRolloutAfter(c.rollout, defaults.GetRolloutVerifyRetryInterval()) + // At the end of the rollout we need to verify the weight is correct, and return an error if not because we don't want the rest of the + // reconcile process to continue. We don't need to do this if we are in the middle of the rollout because the rest of the reconcile + // process won't scale down the old replicasets yet due to being in the middle of some steps. + if desiredWeight == weightutil.MaxTrafficWeight(c.rollout) && len(c.rollout.Spec.Strategy.Canary.Steps) >= int(*c.rollout.Status.CurrentStepIndex) { + return fmt.Errorf("end of rollout, desired weight %d not yet verified", desiredWeight) + } } } } @@ -305,7 +315,7 @@ func (c *rolloutContext) calculateDesiredWeightOnAbortOrStableRollback() int32 { } // When using dynamic stable scaling, we must dynamically decreasing the weight to the canary // according to the availability of the stable (whatever it can support). - desiredWeight := 100 - ((100 * c.stableRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas) + desiredWeight := maxInt(0, weightutil.MaxTrafficWeight(c.rollout)-((weightutil.MaxTrafficWeight(c.rollout)*c.stableRS.Status.AvailableReplicas) / *c.rollout.Spec.Replicas)) if c.rollout.Status.Canary.Weights != nil { // This ensures that if we are already at a lower weight, then we will not // increase the weight because stable availability is flapping (e.g. pod restarts) @@ -338,7 +348,7 @@ func calculateWeightStatus(ro *v1alpha1.Rollout, canaryHash, stableHash string, ServiceName: ro.Spec.Strategy.Canary.CanaryService, }, } - stableWeight := 100 - desiredWeight + stableWeight := weightutil.MaxTrafficWeight(ro) - desiredWeight for _, weightDest := range weightDestinations { weights.Additional = append(weights.Additional, weightDest) stableWeight -= weightDest.Weight @@ -371,11 +381,13 @@ func (c *rolloutContext) calculateWeightDestinationsFromExperiment() []v1alpha1. } for _, templateStatus := range c.currentEx.Status.TemplateStatuses { templateWeight := getTemplateWeight(templateStatus.Name) - weightDestinations = append(weightDestinations, v1alpha1.WeightDestination{ - ServiceName: templateStatus.ServiceName, - PodTemplateHash: templateStatus.PodTemplateHash, - Weight: *templateWeight, - }) + if templateWeight != nil { + weightDestinations = append(weightDestinations, v1alpha1.WeightDestination{ + ServiceName: templateStatus.ServiceName, + PodTemplateHash: templateStatus.PodTemplateHash, + Weight: *templateWeight, + }) + } } } return weightDestinations diff --git a/rollout/trafficrouting/alb/alb.go b/rollout/trafficrouting/alb/alb.go index b1b9ff0f25..0a053857d6 100644 --- a/rollout/trafficrouting/alb/alb.go +++ b/rollout/trafficrouting/alb/alb.go @@ -202,7 +202,7 @@ func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations .. return nil, nil } - if !rolloututil.ShouldVerifyWeight(r.cfg.Rollout) { + if !rolloututil.ShouldVerifyWeight(r.cfg.Rollout, desiredWeight) { // If we should not verify weight but the ALB status has not been set yet due to a Rollout resource just being // installed in the cluster we want to actually run the rest of the function, so we do not return if // r.cfg.Rollout.Status.ALB is nil. However, if we should not verify, and we have already updated the status once @@ -242,7 +242,7 @@ func (r *Reconciler) VerifyWeightPerIngress(desiredWeight int32, ingresses []str } resourceIDToDest := map[string]v1alpha1.WeightDestination{} - stableService, canaryService := trafficrouting.GetStableAndCanaryServices(rollout) + stableService, canaryService := trafficrouting.GetStableAndCanaryServices(rollout, true) canaryResourceID := aws.BuildTargetGroupResourceID(rollout.Namespace, ingress.GetName(), canaryService, rollout.Spec.Strategy.Canary.TrafficRouting.ALB.ServicePort) stableResourceID := aws.BuildTargetGroupResourceID(rollout.Namespace, ingress.GetName(), stableService, rollout.Spec.Strategy.Canary.TrafficRouting.ALB.ServicePort) @@ -251,22 +251,22 @@ func (r *Reconciler) VerifyWeightPerIngress(desiredWeight int32, ingresses []str resourceIDToDest[resourceID] = dest } - loadBalancerStatus := ingress.GetLoadBalancerStatus() - if len(loadBalancerStatus.Ingress) == 0 { + hostnames := ingress.GetLoadBalancerHostnames() + if len(hostnames) == 0 { r.log.Infof("LoadBalancer not yet allocated") } - for _, lbIngress := range loadBalancerStatus.Ingress { - if lbIngress.Hostname == "" { + for _, hostname := range hostnames { + if hostname == "" { continue } - lb, err := r.aws.FindLoadBalancerByDNSName(ctx, lbIngress.Hostname) + lb, err := r.aws.FindLoadBalancerByDNSName(ctx, hostname) if err != nil { r.cfg.Recorder.Warnf(rollout, record.EventOptions{EventReason: conditions.TargetGroupVerifyErrorReason}, conditions.TargetGroupVerifyErrorMessage, canaryService, "unknown", err.Error()) return pointer.Bool(false), err } if lb == nil || lb.LoadBalancerArn == nil { - r.cfg.Recorder.Warnf(rollout, record.EventOptions{EventReason: conditions.LoadBalancerNotFoundReason}, conditions.LoadBalancerNotFoundMessage, lbIngress.Hostname) + r.cfg.Recorder.Warnf(rollout, record.EventOptions{EventReason: conditions.LoadBalancerNotFoundReason}, conditions.LoadBalancerNotFoundMessage, hostname) return pointer.Bool(false), nil } @@ -347,7 +347,7 @@ func updateTargetGroupStatus(status *v1alpha1.ALBStatus, tg *aws.TargetGroupMeta } func getForwardActionString(r *v1alpha1.Rollout, port int32, desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (string, error) { - stableService, canaryService := trafficrouting.GetStableAndCanaryServices(r) + stableService, canaryService := trafficrouting.GetStableAndCanaryServices(r, true) portStr := strconv.Itoa(int(port)) stableWeight := int32(100) targetGroups := make([]ingressutil.ALBTargetGroup, 0) @@ -479,7 +479,7 @@ func removeValue(array []string, key string) []string { } func getTrafficForwardActionString(r *v1alpha1.Rollout, port int32) (string, error) { - _, canaryService := trafficrouting.GetStableAndCanaryServices(r) + _, canaryService := trafficrouting.GetStableAndCanaryServices(r, true) portStr := strconv.Itoa(int(port)) weight := int64(100) targetGroups := make([]ingressutil.ALBTargetGroup, 0) @@ -515,17 +515,28 @@ func getTrafficForwardActionString(r *v1alpha1.Rollout, port int32) (string, err return string(bytes), nil } +// Two exact matches with the same header name should be merged into the values array of the same condition +func upsertCondition(res []ingressutil.ALBCondition, match v1alpha1.HeaderRoutingMatch) []ingressutil.ALBCondition { + for i, condition := range res { + if condition.HttpHeaderConfig.HttpHeaderName == match.HeaderName { + res[i].HttpHeaderConfig.Values = append(res[i].HttpHeaderConfig.Values, match.HeaderValue.Exact) + return res + } + } + condition := ingressutil.ALBCondition{ + Field: "http-header", + HttpHeaderConfig: ingressutil.HttpHeaderConfig{ + HttpHeaderName: match.HeaderName, + Values: []string{match.HeaderValue.Exact}, + }, + } + return append(res, condition) +} + func getTrafficForwardConditionString(headerRoute *v1alpha1.SetHeaderRoute) (string, error) { var res []ingressutil.ALBCondition for _, match := range headerRoute.Match { - condition := ingressutil.ALBCondition{ - Field: "http-header", - HttpHeaderConfig: ingressutil.HttpHeaderConfig{ - HttpHeaderName: match.HeaderName, - Values: []string{match.HeaderValue.Exact}, - }, - } - res = append(res, condition) + res = upsertCondition(res, match) } bytes := jsonutil.MustMarshal(res) return string(bytes), nil diff --git a/rollout/trafficrouting/alb/alb_test.go b/rollout/trafficrouting/alb/alb_test.go index e81b456941..faa798e524 100644 --- a/rollout/trafficrouting/alb/alb_test.go +++ b/rollout/trafficrouting/alb/alb_test.go @@ -7,15 +7,15 @@ import ( "strings" "testing" + networkingv1 "k8s.io/api/networking/v1" + elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" @@ -125,7 +125,7 @@ func albActionAnnotation(stable string) string { return fmt.Sprintf("%s%s%s", ingressutil.ALBIngressAnnotation, ingressutil.ALBActionPrefix, stable) } -func ingress(name, stableSvc, canarySvc, actionService string, port, weight int32, managedBy string, includeStickyConfig bool) *extensionsv1beta1.Ingress { +func ingress(name, stableSvc, canarySvc, actionService string, port, weight int32, managedBy string, includeStickyConfig bool) *networkingv1.Ingress { managedByValue := ingressutil.ManagedALBAnnotations{ managedBy: ingressutil.ManagedALBAnnotation{albActionAnnotation(actionService)}, } @@ -139,7 +139,7 @@ func ingress(name, stableSvc, canarySvc, actionService string, port, weight int3 panic(err) } - i := &extensionsv1beta1.Ingress{ + i := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: metav1.NamespaceDefault, @@ -148,16 +148,22 @@ func ingress(name, stableSvc, canarySvc, actionService string, port, weight int3 ingressutil.ManagedAnnotations: managedByValue.String(), }, }, - Spec: extensionsv1beta1.IngressSpec{ - Rules: []extensionsv1beta1.IngressRule{ + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: actionService, - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: actionService, + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + Number: 0, + }, + }, + Resource: nil, }, }, }, @@ -207,7 +213,7 @@ func TestIngressNotFound(t *testing.T) { ro := fakeRollout("stable-service", "canary-service", nil, "stable-ingress", 443) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -227,7 +233,7 @@ func TestIngressNotFoundMultiIngress(t *testing.T) { ro := fakeRolloutWithMultiIngress("stable-service", "canary-service", nil, []string{"stable-ingress", "multi-ingress"}, 443) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -250,7 +256,7 @@ func TestServiceNotFoundInIngress(t *testing.T) { client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -274,8 +280,8 @@ func TestServiceNotFoundInMultiIngress(t *testing.T) { client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -296,8 +302,8 @@ func TestNoChanges(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -320,9 +326,9 @@ func TestNoChangesMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -345,8 +351,8 @@ func TestErrorOnInvalidManagedBy(t *testing.T) { i.Annotations[ingressutil.ManagedAnnotations] = "test" client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -369,9 +375,9 @@ func TestErrorOnInvalidManagedByMultiIngress(t *testing.T) { mi.Annotations[ingressutil.ManagedAnnotations] = "test" client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -393,8 +399,8 @@ func TestSetInitialDesiredWeight(t *testing.T) { i.Annotations = map[string]string{} client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -418,9 +424,9 @@ func TestSetInitialDesiredWeightMultiIngress(t *testing.T) { i.Annotations = map[string]string{} client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -446,8 +452,8 @@ func TestSetWeightPingPong(t *testing.T) { i.Annotations = map[string]string{} client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -475,9 +481,9 @@ func TestSetWeightPingPongMultiIngress(t *testing.T) { i.Annotations = map[string]string{} client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -500,8 +506,8 @@ func TestUpdateDesiredWeightWithStickyConfig(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, true) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) assert.Nil(t, err) r, err := NewReconciler(ReconcilerConfig{ Rollout: ro, @@ -522,9 +528,9 @@ func TestUpdateDesiredWeightWithStickyConfigMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, true) client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) assert.Nil(t, err) r, err := NewReconciler(ReconcilerConfig{ Rollout: ro, @@ -544,8 +550,8 @@ func TestUpdateDesiredWeight(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -568,9 +574,9 @@ func TestUpdateDesiredWeightMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -646,8 +652,8 @@ func TestErrorPatching(t *testing.T) { client := fake.NewSimpleClientset(i) client.ReactionChain = nil k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -677,9 +683,9 @@ func TestErrorPatchingMultiIngress(t *testing.T) { client := fake.NewSimpleClientset(i, mi) client.ReactionChain = nil k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -800,8 +806,9 @@ func TestVerifyWeight(t *testing.T) { SetWeight: pointer.Int32Ptr(10), }} i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) - i.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + + i.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-test-abc-123.us-west-2.elb.amazonaws.com", }, @@ -810,8 +817,8 @@ func TestVerifyWeight(t *testing.T) { client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -911,6 +918,7 @@ func TestVerifyWeight(t *testing.T) { { var status v1alpha1.RolloutStatus r, fakeClient := newFakeReconciler(&status) + fakeClient.loadBalancers = []*elbv2types.LoadBalancer{ { LoadBalancerName: pointer.StringPtr("lb-abc123-name"), @@ -948,6 +956,48 @@ func TestVerifyWeight(t *testing.T) { assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus("ingress")) } + // LoadBalancer found, at max weight, end of rollout + { + var status v1alpha1.RolloutStatus + status.CurrentStepIndex = pointer.Int32Ptr(2) + r, fakeClient := newFakeReconciler(&status) + fakeClient.loadBalancers = []*elbv2types.LoadBalancer{ + { + LoadBalancerName: pointer.StringPtr("lb-abc123-name"), + LoadBalancerArn: pointer.StringPtr("arn:aws:elasticloadbalancing:us-east-2:123456789012:loadbalancer/app/lb-abc123-name/1234567890123456"), + DNSName: pointer.StringPtr("verify-weight-test-abc-123.us-west-2.elb.amazonaws.com"), + }, + } + fakeClient.targetGroups = []aws.TargetGroupMeta{ + { + TargetGroup: elbv2types.TargetGroup{ + TargetGroupName: pointer.StringPtr("canary-tg-abc123-name"), + TargetGroupArn: pointer.StringPtr("arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/canary-tg-abc123-name/1234567890123456"), + }, + Weight: pointer.Int32Ptr(100), + Tags: map[string]string{ + aws.AWSLoadBalancerV2TagKeyResourceID: "default/ingress-canary-svc:443", + }, + }, + { + TargetGroup: elbv2types.TargetGroup{ + TargetGroupName: pointer.StringPtr("stable-tg-abc123-name"), + TargetGroupArn: pointer.StringPtr("arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/stable-tg-abc123-name/1234567890123456"), + }, + Weight: pointer.Int32Ptr(0), + Tags: map[string]string{ + aws.AWSLoadBalancerV2TagKeyResourceID: "default/ingress-stable-svc:443", + }, + }, + } + + weightVerified, err := r.VerifyWeight(100) + assert.NoError(t, err) + assert.True(t, *weightVerified) + assert.Equal(t, status.ALBs[0], *status.ALB) + assert.Equal(t, *status.ALB, *fakeClient.getAlbStatus("ingress")) + } + // LoadBalancer found, but ARNs are unparsable { var status v1alpha1.RolloutStatus @@ -1001,15 +1051,15 @@ func TestVerifyWeightMultiIngress(t *testing.T) { }} i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 5, ro.Name, false) - i.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + i.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-test-abc-123.us-west-2.elb.amazonaws.com", }, }, } - mi.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + mi.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-multi-ingress.us-west-2.elb.amazonaws.com", }, @@ -1018,9 +1068,9 @@ func TestVerifyWeightMultiIngress(t *testing.T) { client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1224,8 +1274,8 @@ func TestSetWeightWithMultipleBackends(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 0, ro.Name, false) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1272,9 +1322,9 @@ func TestSetWeightWithMultipleBackendsMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 0, ro.Name, false) client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1337,8 +1387,8 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 0, ro.Name, false) i.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"] = fmt.Sprintf(actionTemplateWithExperiments, CANARY_SVC, 443, 10, weightDestinations[0].ServiceName, 443, weightDestinations[0].Weight, weightDestinations[1].ServiceName, 443, weightDestinations[1].Weight, STABLE_SVC, 443, 85) - i.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + i.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-test-abc-123.us-west-2.elb.amazonaws.com", }, @@ -1347,8 +1397,8 @@ func TestVerifyWeightWithAdditionalDestinations(t *testing.T) { client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1552,15 +1602,15 @@ func TestVerifyWeightWithAdditionalDestinationsMultiIngress(t *testing.T) { i.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"] = fmt.Sprintf(actionTemplateWithExperiments, CANARY_SVC, 443, 10, weightDestinations[0].ServiceName, 443, weightDestinations[0].Weight, weightDestinations[1].ServiceName, 443, weightDestinations[1].Weight, STABLE_SVC, 443, 85) mi.Annotations["alb.ingress.kubernetes.io/actions.stable-svc"] = fmt.Sprintf(actionTemplateWithExperiments, CANARY_SVC, 443, 10, weightDestinations[0].ServiceName, 443, weightDestinations[0].Weight, weightDestinations[1].ServiceName, 443, weightDestinations[1].Weight, STABLE_SVC, 443, 85) - i.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + i.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-test-abc-123.us-west-2.elb.amazonaws.com", }, }, } - mi.Status.LoadBalancer = corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + mi.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { Hostname: "verify-weight-multi-ingress.us-west-2.elb.amazonaws.com", }, @@ -1569,9 +1619,9 @@ func TestVerifyWeightWithAdditionalDestinationsMultiIngress(t *testing.T) { client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1835,8 +1885,8 @@ func TestSetHeaderRoute(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, "action1", 443, 10, ro.Name, false) client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1866,6 +1916,108 @@ func TestSetHeaderRoute(t *testing.T) { assert.Len(t, client.Actions(), 1) } +func TestSetHeaderRouteWithDifferentHeaderNames(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = []v1alpha1.MangedRoutes{ + {Name: "header-route"}, + } + + i := ingress("ingress", STABLE_SVC, CANARY_SVC, "action1", 443, 10, ro.Name, false) + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) + if err != nil { + t.Fatal(err) + } + + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "header-route", + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "origin", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "https://www.fake-origin1.com", + }, + }, + { + HeaderName: "Agent", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "Chrome", + }, + }, + }, + }) + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) + + // no managed routes, no changes expected + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) +} + +func TestSetHeaderRouteWithDuplicateHeaderNameMatches(t *testing.T) { + ro := fakeRollout(STABLE_SVC, CANARY_SVC, nil, "ingress", 443) + ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = []v1alpha1.MangedRoutes{ + {Name: "header-route"}, + } + + i := ingress("ingress", STABLE_SVC, CANARY_SVC, "action1", 443, 10, ro.Name, false) + client := fake.NewSimpleClientset(i) + k8sI := kubeinformers.NewSharedInformerFactory(client, 0) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) + if err != nil { + t.Fatal(err) + } + + r, err := NewReconciler(ReconcilerConfig{ + Rollout: ro, + Client: client, + Recorder: record.NewFakeEventRecorder(), + ControllerKind: schema.GroupVersionKind{Group: "foo", Version: "v1", Kind: "Bar"}, + IngressWrapper: ingressWrapper, + }) + assert.NoError(t, err) + + err = r.SetHeaderRoute(&v1alpha1.SetHeaderRoute{ + Name: "header-route", + Match: []v1alpha1.HeaderRoutingMatch{ + { + HeaderName: "origin", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "https://www.fake-origin1.com", + }, + }, + { + HeaderName: "origin", + HeaderValue: &v1alpha1.StringMatch{ + Exact: "https://www.fake-origin2.com", + }, + }, + }, + }) + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) + + // no managed routes, no changes expected + err = r.RemoveManagedRoutes() + assert.Nil(t, err) + assert.Len(t, client.Actions(), 1) +} + func TestSetHeaderRouteMultiIngress(t *testing.T) { ro := fakeRolloutWithMultiIngress(STABLE_SVC, CANARY_SVC, nil, []string{"ingress", "multi-ingress"}, 443) ro.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes = []v1alpha1.MangedRoutes{ @@ -1875,9 +2027,9 @@ func TestSetHeaderRouteMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, "action2", 443, 10, ro.Name, false) client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1923,15 +2075,21 @@ func TestRemoveManagedRoutes(t *testing.T) { i.Annotations["alb.ingress.kubernetes.io/actions.header-route"] = "{}" i.Annotations["alb.ingress.kubernetes.io/conditions.header-route"] = "{}" i.Annotations[ingressutil.ManagedAnnotations] = managedByValue.String() - i.Spec.Rules = []extensionsv1beta1.IngressRule{ + i.Spec.Rules = []networkingv1.IngressRule{ { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "action1", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "action1", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + Number: 0, + }, + }, + Resource: nil, }, }, }, @@ -1939,13 +2097,19 @@ func TestRemoveManagedRoutes(t *testing.T) { }, }, { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "header-route", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "header-route", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + Number: 0, + }, + }, + Resource: nil, }, }, }, @@ -1956,8 +2120,8 @@ func TestRemoveManagedRoutes(t *testing.T) { client := fake.NewSimpleClientset(i) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -1998,15 +2162,21 @@ func TestRemoveManagedRoutesMultiIngress(t *testing.T) { i.Annotations["alb.ingress.kubernetes.io/actions.header-route"] = "{}" i.Annotations["alb.ingress.kubernetes.io/conditions.header-route"] = "{}" i.Annotations[ingressutil.ManagedAnnotations] = managedByValue.String() - i.Spec.Rules = []extensionsv1beta1.IngressRule{ + i.Spec.Rules = []networkingv1.IngressRule{ { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "action1", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "action1", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + Number: 0, + }, + }, + Resource: nil, }, }, }, @@ -2014,13 +2184,19 @@ func TestRemoveManagedRoutesMultiIngress(t *testing.T) { }, }, { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "header-route", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "header-route", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + Number: 0, + }, + }, + Resource: nil, }, }, }, @@ -2032,15 +2208,20 @@ func TestRemoveManagedRoutesMultiIngress(t *testing.T) { mi.Annotations["alb.ingress.kubernetes.io/actions.header-route"] = "{}" mi.Annotations["alb.ingress.kubernetes.io/conditions.header-route"] = "{}" mi.Annotations[ingressutil.ManagedAnnotations] = managedByValue.String() - mi.Spec.Rules = []extensionsv1beta1.IngressRule{ + mi.Spec.Rules = []networkingv1.IngressRule{ { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "action1", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "action1", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + }, + }, + Resource: nil, }, }, }, @@ -2048,13 +2229,18 @@ func TestRemoveManagedRoutesMultiIngress(t *testing.T) { }, }, { - IngressRuleValue: extensionsv1beta1.IngressRuleValue{ - HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ - Paths: []extensionsv1beta1.HTTPIngressPath{ + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ { - Backend: extensionsv1beta1.IngressBackend{ - ServiceName: "header-route", - ServicePort: intstr.Parse("use-annotation"), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "header-route", + Port: networkingv1.ServiceBackendPort{ + Name: "use-annotation", + }, + }, + Resource: nil, }, }, }, @@ -2065,9 +2251,9 @@ func TestRemoveManagedRoutesMultiIngress(t *testing.T) { client := fake.NewSimpleClientset(i, mi) k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -2096,8 +2282,8 @@ func TestSetMirrorRoute(t *testing.T) { i := ingress("ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } @@ -2128,9 +2314,9 @@ func TestSetMirrorRouteMultiIngress(t *testing.T) { mi := ingress("multi-ingress", STABLE_SVC, CANARY_SVC, STABLE_SVC, 443, 10, ro.Name, false) client := fake.NewSimpleClientset() k8sI := kubeinformers.NewSharedInformerFactory(client, 0) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(i) - k8sI.Extensions().V1beta1().Ingresses().Informer().GetIndexer().Add(mi) - ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeExtensions, client, k8sI) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(i) + k8sI.Networking().V1().Ingresses().Informer().GetIndexer().Add(mi) + ingressWrapper, err := ingressutil.NewIngressWrapper(ingressutil.IngressModeNetworking, client, k8sI) if err != nil { t.Fatal(err) } diff --git a/rollout/trafficrouting/ambassador/ambassador.go b/rollout/trafficrouting/ambassador/ambassador.go index 5c50f38f26..ceb389c81a 100644 --- a/rollout/trafficrouting/ambassador/ambassador.go +++ b/rollout/trafficrouting/ambassador/ambassador.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "strconv" "strings" "sync" "time" @@ -218,7 +217,8 @@ func (r *Reconciler) createCanaryMapping(ctx context.Context, } canarySvc := r.Rollout.Spec.Strategy.Canary.CanaryService - canaryMapping := buildCanaryMapping(baseMapping, canarySvc, desiredWeight) + stableService := r.Rollout.Spec.Strategy.Canary.StableService + canaryMapping := buildCanaryMapping(baseMapping, canarySvc, stableService, desiredWeight) _, err = client.Create(ctx, canaryMapping, metav1.CreateOptions{}) if err != nil { msg := fmt.Sprintf("Error creating canary mapping: %s", err) @@ -227,9 +227,9 @@ func (r *Reconciler) createCanaryMapping(ctx context.Context, return err } -func buildCanaryMapping(baseMapping *unstructured.Unstructured, canarySvc string, desiredWeight int32) *unstructured.Unstructured { +func buildCanaryMapping(baseMapping *unstructured.Unstructured, canarySvc string, stableService string, desiredWeight int32) *unstructured.Unstructured { canaryMapping := baseMapping.DeepCopy() - svc := buildCanaryService(baseMapping, canarySvc) + svc := buildCanaryService(baseMapping, canarySvc, stableService) unstructured.RemoveNestedField(canaryMapping.Object, "metadata") cMappingName := buildCanaryMappingName(baseMapping.GetName()) canaryMapping.SetName(cMappingName) @@ -239,19 +239,9 @@ func buildCanaryMapping(baseMapping *unstructured.Unstructured, canarySvc string return canaryMapping } -func buildCanaryService(baseMapping *unstructured.Unstructured, canarySvc string) string { +func buildCanaryService(baseMapping *unstructured.Unstructured, canarySvc string, stableService string) string { curSvc := GetMappingService(baseMapping) - parts := strings.Split(curSvc, ":") - if len(parts) < 2 { - return canarySvc - } - // Check if the last part is a valid int that can be used as the port - port := parts[len(parts)-1] - if _, err := strconv.Atoi(port); err != nil { - return canarySvc - - } - return fmt.Sprintf("%s:%s", canarySvc, port) + return strings.Replace(curSvc, stableService, canarySvc, 1) } func (r *Reconciler) VerifyWeight(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) (*bool, error) { diff --git a/rollout/trafficrouting/ambassador/ambassador_test.go b/rollout/trafficrouting/ambassador/ambassador_test.go index bd6e4613de..ee1ac347e3 100644 --- a/rollout/trafficrouting/ambassador/ambassador_test.go +++ b/rollout/trafficrouting/ambassador/ambassador_test.go @@ -31,7 +31,7 @@ metadata: spec: prefix: /myapp/ rewrite: /myapp/ - service: myapp:8080` + service: main-service:8080` baseMappingNoPort = ` apiVersion: getambassador.io/v2 @@ -42,7 +42,7 @@ metadata: spec: prefix: /myapp/ rewrite: /myapp/ - service: myapp` + service: main-service` baseMappingWithWeight = ` apiVersion: getambassador.io/v2 @@ -53,7 +53,7 @@ metadata: spec: prefix: /myapp/ rewrite: /myapp/ - service: myapp:8080 + service: main-service:8080 weight: 20` baseV3Mapping = ` @@ -66,7 +66,7 @@ spec: hostname: 'example.com' prefix: /myapp/ rewrite: /myapp/ - service: myapp:8080` + service: main-service:8080` canaryMapping = ` apiVersion: getambassador.io/v2 @@ -77,7 +77,7 @@ metadata: spec: prefix: /myapp/ rewrite: /myapp/ - service: myapp:8080 + service: main-service:8080 weight: 20` canaryMappingWithZeroWeight = ` @@ -89,7 +89,7 @@ metadata: spec: prefix: /myapp/ rewrite: /myapp/ - service: myapp:8080 + service: main-service:8080 weight: 0` ) @@ -136,8 +136,9 @@ type getReturn struct { func (f *fakeClient) Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { invokation := &getInvokation{name: name} f.mu.Lock() + defer f.mu.Unlock() f.getInvokations = append(f.getInvokations, invokation) - f.mu.Unlock() + if len(f.getReturns) == 0 { return nil, nil } @@ -145,7 +146,8 @@ func (f *fakeClient) Get(ctx context.Context, name string, options metav1.GetOpt if len(f.getReturns) >= len(f.getInvokations) { ret = f.getReturns[len(f.getInvokations)-1] } - return ret.obj, ret.err + // We clone the object before returning it, to prevent modification of the fake object in memory by the calling function + return ret.obj.DeepCopy(), ret.err } func (f *fakeClient) Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { diff --git a/rollout/trafficrouting/apisix/apisix.go b/rollout/trafficrouting/apisix/apisix.go index 1147721a39..6269cc698e 100644 --- a/rollout/trafficrouting/apisix/apisix.go +++ b/rollout/trafficrouting/apisix/apisix.go @@ -91,7 +91,7 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 return err } -func (r *Reconciler) processSetWeightRoutes(desiredWeight int32, apisixRoute *unstructured.Unstructured, rollout *v1alpha1.Rollout, apisixRouteName string) ([]interface{}, error) { +func (r *Reconciler) processSetWeightRoutes(desiredWeight int32, apisixRoute *unstructured.Unstructured, rollout *v1alpha1.Rollout, apisixRouteName string) ([]any, error) { httpRoutes, isFound, err := unstructured.NestedSlice(apisixRoute.Object, "spec", "http") if err != nil { return nil, err @@ -129,9 +129,9 @@ func (r *Reconciler) processSetWeightRoutes(desiredWeight int32, apisixRoute *un return httpRoutes, nil } -func GetHttpRoute(routes []interface{}, ref string) (interface{}, error) { +func GetHttpRoute(routes []any, ref string) (any, error) { for _, route := range routes { - typedRoute, ok := route.(map[string]interface{}) + typedRoute, ok := route.(map[string]any) if !ok { return nil, errors.New(failedToTypeAssertion) } @@ -151,8 +151,8 @@ func GetHttpRoute(routes []interface{}, ref string) (interface{}, error) { return nil, errors.New(fmt.Sprintf("Apisix http route rule %s not found", ref)) } -func GetBackends(httpRoute interface{}) ([]interface{}, error) { - typedHttpRoute, ok := httpRoute.(map[string]interface{}) +func GetBackends(httpRoute any) ([]any, error) { + typedHttpRoute, ok := httpRoute.(map[string]any) if !ok { return nil, errors.New(failedToTypeAssertion) } @@ -160,17 +160,17 @@ func GetBackends(httpRoute interface{}) ([]interface{}, error) { if !ok { return nil, errors.New("Apisix http route backends not found") } - backends, ok := rawBackends.([]interface{}) + backends, ok := rawBackends.([]any) if !ok { return nil, errors.New(fmt.Sprintf("%s backends", failedToTypeAssertion)) } return backends, nil } -func setBackendWeight(backendName string, backends []interface{}, weight int64) error { +func setBackendWeight(backendName string, backends []any, weight int64) error { found := false for _, backend := range backends { - typedBackend, ok := backend.(map[string]interface{}) + typedBackend, ok := backend.(map[string]any) if !ok { return errors.New(fmt.Sprintf("%s backends", failedToTypeAssertion)) } @@ -323,14 +323,14 @@ func (r *Reconciler) makeSetHeaderRoute(ctx context.Context, headerRouting *v1al return setHeaderApisixRoute, isNew, nil } -func removeBackend(route interface{}, backendName string, backends []interface{}) error { - typedRoute, ok := route.(map[string]interface{}) +func removeBackend(route any, backendName string, backends []any) error { + typedRoute, ok := route.(map[string]any) if !ok { return errors.New("Failed type assertion for Apisix http route") } - result := []interface{}{} + result := []any{} for _, backend := range backends { - typedBackend, ok := backend.(map[string]interface{}) + typedBackend, ok := backend.(map[string]any) if !ok { return errors.New("Failed type assertion for Apisix http route backend") } @@ -348,8 +348,8 @@ func removeBackend(route interface{}, backendName string, backends []interface{} return unstructured.SetNestedSlice(typedRoute, result, "backends") } -func processRulePriority(route interface{}) error { - typedRoute, ok := route.(map[string]interface{}) +func processRulePriority(route any) error { + typedRoute, ok := route.(map[string]any) if !ok { return errors.New("Failed type assertion for Apisix http route") } @@ -366,40 +366,40 @@ func processRulePriority(route interface{}) error { return nil } -func setApisixRuleMatch(route interface{}, headerRouting *v1alpha1.SetHeaderRoute) error { - typedRoute, ok := route.(map[string]interface{}) +func setApisixRuleMatch(route any, headerRouting *v1alpha1.SetHeaderRoute) error { + typedRoute, ok := route.(map[string]any) if !ok { return errors.New("Failed type assertion for Apisix http route") } - exprs := []interface{}{} + exprs := []any{} for _, match := range headerRouting.Match { exprs = append(exprs, apisixExprs(match.HeaderName, match.HeaderValue.Exact, match.HeaderValue.Regex, match.HeaderValue.Prefix)...) } return unstructured.SetNestedSlice(typedRoute, exprs, "match", "exprs") } -func apisixExprs(header, exact, regex, prefix string) []interface{} { - subject := map[string]interface{}{ +func apisixExprs(header, exact, regex, prefix string) []any { + subject := map[string]any{ "scope": "Header", "name": header, } - exprs := []interface{}{} + exprs := []any{} if exact != "" { - exprs = append(exprs, map[string]interface{}{ + exprs = append(exprs, map[string]any{ "subject": subject, "op": "Equal", "value": exact, }) } if regex != "" { - exprs = append(exprs, map[string]interface{}{ + exprs = append(exprs, map[string]any{ "subject": subject, "op": "RegexMatch", "value": regex, }) } if prefix != "" { - exprs = append(exprs, map[string]interface{}{ + exprs = append(exprs, map[string]any{ "subject": subject, "op": "RegexMatch", "value": fmt.Sprintf("^%s.*", prefix), diff --git a/rollout/trafficrouting/apisix/apisix_test.go b/rollout/trafficrouting/apisix/apisix_test.go index 9fcf4ff394..3749e64d6d 100644 --- a/rollout/trafficrouting/apisix/apisix_test.go +++ b/rollout/trafficrouting/apisix/apisix_test.go @@ -116,10 +116,6 @@ spec: priority: 2 ` -var ( - client *mocks.FakeClient = &mocks.FakeClient{} -) - const ( stableServiceName string = "stable-rollout" fakeStableServiceName string = "fake-stable-rollout" @@ -135,7 +131,7 @@ func TestUpdateHash(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -152,10 +148,9 @@ func TestSetWeight(t *testing.T) { mocks.ErrorApisixRouteObj = toUnstructured(t, errorApisixRoute) t.Run("SetWeight", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -172,7 +167,7 @@ func TestSetWeight(t *testing.T) { backends, err := GetBackends(apisixHttpRouteObj) assert.NoError(t, err) for _, backend := range backends { - typedBackend, ok := backend.(map[string]interface{}) + typedBackend, ok := backend.(map[string]any) assert.Equal(t, ok, true) nameOfCurrentBackend, isFound, err := unstructured.NestedString(typedBackend, "serviceName") assert.NoError(t, err) @@ -195,7 +190,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithError", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -212,7 +206,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorManifest", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -229,10 +222,9 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorStableName", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(fakeStableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -244,10 +236,9 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorCanaryName", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, fakeCanaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -259,7 +250,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("ApisixUpdateError", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -279,7 +269,7 @@ func TestSetWeight(t *testing.T) { func TestGetHttpRouteError(t *testing.T) { type testcase struct { - routes []interface{} + routes []any ref string } testcases := []testcase{ @@ -288,28 +278,28 @@ func TestGetHttpRouteError(t *testing.T) { ref: "nil", }, { - routes: []interface{}{""}, + routes: []any{""}, ref: "Failed type", }, { - routes: []interface{}{ - map[string]interface{}{ + routes: []any{ + map[string]any{ "x": nil, }, }, ref: "noname", }, { - routes: []interface{}{ - map[string]interface{}{ + routes: []any{ + map[string]any{ "name": 123, }, }, ref: "name type error", }, { - routes: []interface{}{ - map[string]interface{}{ + routes: []any{ + map[string]any{ "name": "123", }, }, @@ -324,11 +314,11 @@ func TestGetHttpRouteError(t *testing.T) { } func TestGetBackendsError(t *testing.T) { - testcases := []interface{}{ + testcases := []any{ nil, 123, - map[string]interface{}{}, - map[string]interface{}{ + map[string]any{}, + map[string]any{ "backends": "123", }, } @@ -342,26 +332,26 @@ func TestGetBackendsError(t *testing.T) { func TestSetBackendWeightError(t *testing.T) { type testcase struct { backendName string - backends []interface{} + backends []any weight int64 } testcases := []testcase{ {}, { - backends: []interface{}{ + backends: []any{ "", }, }, { - backends: []interface{}{ - map[string]interface{}{ + backends: []any{ + map[string]any{ "abc": 123, }, }, }, { - backends: []interface{}{ - map[string]interface{}{ + backends: []any{ + map[string]any{ "serviceName": 123, }, }, @@ -380,7 +370,6 @@ func TestSetHeaderRoute(t *testing.T) { mocks.DuplicateSetHeaderApisixRouteObj = toUnstructured(t, apisixSetHeaderDuplicateRoute) mocks.ErrorApisixRouteObj = toUnstructured(t, errorApisixRoute) t.Run("SetHeaderGetRouteError", func(t *testing.T) { - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -397,7 +386,6 @@ func TestSetHeaderRoute(t *testing.T) { assert.Error(t, err) }) t.Run("SetHeaderGetManagedRouteError", func(t *testing.T) { - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -420,7 +408,6 @@ func TestSetHeaderRoute(t *testing.T) { assert.Error(t, err) }) t.Run("SetHeaderDuplicateManagedRouteError", func(t *testing.T) { - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -445,7 +432,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderRouteNilMatchWithNew", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: &mocks.FakeClient{ @@ -466,7 +452,6 @@ func TestSetHeaderRoute(t *testing.T) { t.Run("SetHeaderRouteNilMatch", func(t *testing.T) { client := &mocks.FakeClient{} // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: client, @@ -485,7 +470,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderRoutePriorityWithNew", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsGetNotFoundError: true, } @@ -512,7 +496,7 @@ func TestSetHeaderRoute(t *testing.T) { assert.NoError(t, err) assert.Equal(t, true, ok) - rule, ok := rules[0].(map[string]interface{}) + rule, ok := rules[0].(map[string]any) assert.Equal(t, true, ok) priority, ok, err := unstructured.NestedInt64(rule, "priority") assert.NoError(t, err) @@ -521,7 +505,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderRoutePriorityWithNew", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsGetNotFoundError: false, } @@ -548,7 +531,7 @@ func TestSetHeaderRoute(t *testing.T) { assert.NoError(t, err) assert.Equal(t, true, ok) - rule, ok := rules[0].(map[string]interface{}) + rule, ok := rules[0].(map[string]any) assert.Equal(t, true, ok) priority, ok, err := unstructured.NestedInt64(rule, "priority") assert.NoError(t, err) @@ -558,7 +541,6 @@ func TestSetHeaderRoute(t *testing.T) { t.Run("SetHeaderRouteExprsWithNew", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsGetNotFoundError: true, } @@ -597,7 +579,7 @@ func TestSetHeaderRoute(t *testing.T) { assert.NoError(t, err) assert.Equal(t, true, ok) - rule, ok := rules[0].(map[string]interface{}) + rule, ok := rules[0].(map[string]any) assert.Equal(t, true, ok) exprs, ok, err := unstructured.NestedSlice(rule, "match", "exprs") assert.NoError(t, err) @@ -613,7 +595,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderRouteExprs", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsGetNotFoundError: false, } @@ -652,7 +633,7 @@ func TestSetHeaderRoute(t *testing.T) { assert.NoError(t, err) assert.Equal(t, true, ok) - rule, ok := rules[0].(map[string]interface{}) + rule, ok := rules[0].(map[string]any) assert.Equal(t, true, ok) exprs, ok, err := unstructured.NestedSlice(rule, "match", "exprs") assert.NoError(t, err) @@ -668,7 +649,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderDeleteError", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsDeleteError: true, } @@ -686,7 +666,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderCreateError", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsCreateError: true, IsGetNotFoundError: true, @@ -710,7 +689,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("SetHeaderUpdateError", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ UpdateError: true, IsGetNotFoundError: false, @@ -734,7 +712,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("RemoveManagedRoutesDeleteError", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsDeleteError: true, } @@ -749,7 +726,6 @@ func TestSetHeaderRoute(t *testing.T) { }) t.Run("RemoveManagedRoutesNilManagedRoutes", func(t *testing.T) { // Given - t.Parallel() client := &mocks.FakeClient{ IsDeleteError: true, } @@ -765,11 +741,11 @@ func TestSetHeaderRoute(t *testing.T) { }) } -func assertExpr(t *testing.T, expr interface{}, op, name, scope, value string) { +func assertExpr(t *testing.T, expr any, op, name, scope, value string) { if expr == nil { assert.Error(t, errors.New("expr is nil")) } - typedExpr, ok := expr.(map[string]interface{}) + typedExpr, ok := expr.(map[string]any) assert.Equal(t, true, ok) opAct, ok, err := unstructured.NestedString(typedExpr, "op") @@ -799,7 +775,7 @@ func TestSetMirrorRoute(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -826,7 +802,6 @@ func TestRemoveManagedRoutes(t *testing.T) { t.Run("RemoveManagedRoutes", func(t *testing.T) { client := &mocks.FakeClient{} // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: client, @@ -842,7 +817,6 @@ func TestRemoveManagedRoutes(t *testing.T) { IsGetManagedRouteError: true, } // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: client, @@ -858,7 +832,6 @@ func TestRemoveManagedRoutes(t *testing.T) { IsGetNotFoundError: true, } // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), Client: client, @@ -889,7 +862,7 @@ func TestVerifyWeight(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -906,10 +879,9 @@ func TestType(t *testing.T) { mocks.ApisixRouteObj = toUnstructured(t, apisixRoute) t.Run("Type", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, apisixRouteName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) diff --git a/rollout/trafficrouting/apisix/mocks/apisix.go b/rollout/trafficrouting/apisix/mocks/apisix.go index c884167412..2bba83c499 100644 --- a/rollout/trafficrouting/apisix/mocks/apisix.go +++ b/rollout/trafficrouting/apisix/mocks/apisix.go @@ -45,10 +45,10 @@ var ( ErrorApisixRouteObj *unstructured.Unstructured ) -func (f *FakeRecorder) Eventf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) Eventf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...any) { } -func (f *FakeRecorder) Warnf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) Warnf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...any) { } func (f *FakeRecorder) K8sRecorder() record.EventRecorder { diff --git a/rollout/trafficrouting/appmesh/appmesh.go b/rollout/trafficrouting/appmesh/appmesh.go index f0528f779a..6272571111 100644 --- a/rollout/trafficrouting/appmesh/appmesh.go +++ b/rollout/trafficrouting/appmesh/appmesh.go @@ -141,7 +141,7 @@ func (r *Reconciler) SetHeaderRoute(headerRouting *v1alpha1.SetHeaderRoute) erro } type routeReconcileContext struct { - route map[string]interface{} + route map[string]any routeIndex int routeFldPath *field.Path rCanaryVnodeRef *v1alpha1.AppMeshVirtualNodeReference @@ -170,7 +170,7 @@ func (r *Reconciler) reconcileVirtualRouter(ctx context.Context, rRoutes []strin for idx, routeI := range routesI { routeFldPath := routesFldPath.Index(idx) - route, ok := routeI.(map[string]interface{}) + route, ok := routeI.(map[string]any) if !ok { return field.Invalid(routeFldPath, uVrCopy.GetName(), ErrNotWellFormed) } @@ -232,12 +232,12 @@ func (r *Reconciler) reconcileRoute(ctx context.Context, uVr *unstructured.Unstr requiresUpdate := false for idx, wtI := range weightedTargets { wtFldPath := weightedTargetsFldPath.Index(idx) - wt, ok := wtI.(map[string]interface{}) + wt, ok := wtI.(map[string]any) if !ok { return false, field.Invalid(wtFldPath, uVr.GetName(), ErrNotWellFormed) } wtVnRefFldPath := wtFldPath.Child("virtualNodeRef") - wtVnRef, ok := wt["virtualNodeRef"].(map[string]interface{}) + wtVnRef, ok := wt["virtualNodeRef"].(map[string]any) if !ok { return false, field.Invalid(wtVnRefFldPath, uVr.GetName(), ErrNotWellFormed) } @@ -324,22 +324,22 @@ func (r *Reconciler) Type() string { return Type } -func getPodSelectorMatchLabels(vnode *unstructured.Unstructured) (map[string]interface{}, error) { +func getPodSelectorMatchLabels(vnode *unstructured.Unstructured) (map[string]any, error) { m, found, err := unstructured.NestedMap(vnode.Object, "spec", "podSelector", "matchLabels") if err != nil { return nil, err } if !found || m == nil { - return make(map[string]interface{}), nil + return make(map[string]any), nil } return m, nil } -func setPodSelectorMatchLabels(vnode *unstructured.Unstructured, ml map[string]interface{}) error { +func setPodSelectorMatchLabels(vnode *unstructured.Unstructured, ml map[string]any) error { return unstructured.SetNestedMap(vnode.Object, ml, "spec", "podSelector", "matchLabels") } -func toInt64(obj interface{}) (int64, error) { +func toInt64(obj any) (int64, error) { switch i := obj.(type) { case float64: return int64(i), nil @@ -370,8 +370,8 @@ func toInt64(obj interface{}) (int64, error) { } } -func GetRouteRule(route map[string]interface{}) (map[string]interface{}, string, error) { - var routeRule map[string]interface{} +func GetRouteRule(route map[string]any) (map[string]any, string, error) { + var routeRule map[string]any var routeType string for _, rType := range supportedRouteTypes { r, found, err := unstructured.NestedMap(route, rType) diff --git a/rollout/trafficrouting/appmesh/appmesh_test.go b/rollout/trafficrouting/appmesh/appmesh_test.go index 5f7ed41843..e110f8227b 100644 --- a/rollout/trafficrouting/appmesh/appmesh_test.go +++ b/rollout/trafficrouting/appmesh/appmesh_test.go @@ -218,7 +218,7 @@ func TestSetWeightWithUpdateVirtualRouterError(t *testing.T) { func TestSetWeightWithInvalidRoutes(t *testing.T) { type args struct { - routes []interface{} + routes []any fieldPathWithError string } @@ -236,7 +236,7 @@ func TestSetWeightWithInvalidRoutes(t *testing.T) { { name: "route with malformed content", args: args{ - routes: []interface{}{ + routes: []any{ "malformed-content", }, fieldPathWithError: field.NewPath("spec", "routes").Index(0).String(), @@ -245,9 +245,9 @@ func TestSetWeightWithInvalidRoutes(t *testing.T) { { name: "route with no name", args: args{ - routes: []interface{}{ - map[string]interface{}{ - "httpRoute": map[string]interface{}{}, + routes: []any{ + map[string]any{ + "httpRoute": map[string]any{}, }, }, fieldPathWithError: field.NewPath("spec", "routes").Index(0).Child("name").String(), @@ -256,10 +256,10 @@ func TestSetWeightWithInvalidRoutes(t *testing.T) { { name: "route with bad route-type", args: args{ - routes: []interface{}{ - map[string]interface{}{ + routes: []any{ + map[string]any{ "name": "primary", - "badRoute": map[string]interface{}{}, + "badRoute": map[string]any{}, }, }, fieldPathWithError: field.NewPath("spec", "routes").Index(0).String(), @@ -268,10 +268,10 @@ func TestSetWeightWithInvalidRoutes(t *testing.T) { { name: "route with no targets", args: args{ - routes: []interface{}{ - map[string]interface{}{ + routes: []any{ + map[string]any{ "name": "primary", - "httpRoute": map[string]interface{}{}, + "httpRoute": map[string]any{}, }, }, fieldPathWithError: field.NewPath("spec", "routes").Index(0).Child("httpRoute").Child("action").Child("weightedTargets").String(), @@ -654,9 +654,9 @@ func TestUpdateHashWhenUpdateCanaryVirtualNodeFails(t *testing.T) { func TestUpdateHashWithVirtualNodeMissingMatchLabels(t *testing.T) { canaryVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineCanaryVnode) - unstructured.SetNestedMap(canaryVnode.Object, make(map[string]interface{}), "spec", "podSelector") + unstructured.SetNestedMap(canaryVnode.Object, make(map[string]any), "spec", "podSelector") stableVnode := unstructuredutil.StrToUnstructuredUnsafe(baselineStableVnode) - unstructured.SetNestedMap(stableVnode.Object, make(map[string]interface{}), "spec", "podSelector") + unstructured.SetNestedMap(stableVnode.Object, make(map[string]any), "spec", "podSelector") client := testutil.NewFakeDynamicClient(canaryVnode, stableVnode) cfg := ReconcilerConfig{ Rollout: fakeRollout(), @@ -704,13 +704,13 @@ func assertSetWeightAction(t *testing.T, action k8stesting.Action, desiredWeight routesI, _, err := unstructured.NestedSlice(uVr, "spec", "routes") assert.Nil(t, err) for _, routeI := range routesI { - route, _ := routeI.(map[string]interface{}) + route, _ := routeI.(map[string]any) weightedTargetsI, found, err := unstructured.NestedSlice(route, routeType, "action", "weightedTargets") assert.Nil(t, err) assert.True(t, found, "Did not find weightedTargets in route") assert.Len(t, weightedTargetsI, 2) for _, wtI := range weightedTargetsI { - wt, _ := wtI.(map[string]interface{}) + wt, _ := wtI.(map[string]any) vnodeName, _, err := unstructured.NestedString(wt, "virtualNodeRef", "name") assert.Nil(t, err) weight, err := toInt64(wt["weight"]) diff --git a/rollout/trafficrouting/appmesh/resource_client.go b/rollout/trafficrouting/appmesh/resource_client.go index 4171b9a385..4dbc4bd8cc 100644 --- a/rollout/trafficrouting/appmesh/resource_client.go +++ b/rollout/trafficrouting/appmesh/resource_client.go @@ -61,7 +61,7 @@ func (rc *ResourceClient) GetVirtualRouterCRForVirtualService(ctx context.Contex return rc.GetVirtualRouterCR(ctx, namespace, name) } -func defaultIfEmpty(strI interface{}, defaultStr string) string { +func defaultIfEmpty(strI any, defaultStr string) string { if strI == nil { return defaultStr } else { diff --git a/rollout/trafficrouting/istio/controller.go b/rollout/trafficrouting/istio/controller.go index 847ae65956..c85c4bc118 100644 --- a/rollout/trafficrouting/istio/controller.go +++ b/rollout/trafficrouting/istio/controller.go @@ -43,7 +43,7 @@ const ( type IstioControllerConfig struct { ArgoprojClientSet roclientset.Interface DynamicClientSet dynamic.Interface - EnqueueRollout func(ro interface{}) + EnqueueRollout func(ro any) RolloutsInformer informers.RolloutInformer VirtualServiceInformer cache.SharedIndexInformer DestinationRuleInformer cache.SharedIndexInformer @@ -67,7 +67,7 @@ func NewIstioController(cfg IstioControllerConfig) *IstioController { // Add a Rollout index against referenced VirtualServices and DestinationRules util.CheckErr(cfg.RolloutsInformer.Informer().AddIndexers(cache.Indexers{ - virtualServiceIndexName: func(obj interface{}) (strings []string, e error) { + virtualServiceIndexName: func(obj any) (strings []string, e error) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { return istioutil.GetRolloutVirtualServiceKeys(ro), nil } @@ -75,7 +75,7 @@ func NewIstioController(cfg IstioControllerConfig) *IstioController { }, })) util.CheckErr(cfg.RolloutsInformer.Informer().AddIndexers(cache.Indexers{ - destinationRuleIndexName: func(obj interface{}) (strings []string, e error) { + destinationRuleIndexName: func(obj any) (strings []string, e error) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { return istioutil.GetRolloutDesinationRuleKeys(ro), nil } @@ -85,27 +85,27 @@ func NewIstioController(cfg IstioControllerConfig) *IstioController { // When a VirtualService changes, simply enqueue the referencing rollout c.VirtualServiceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { c.EnqueueRolloutFromIstioVirtualService(obj) }, // TODO: DeepEquals on httpRoutes - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { c.EnqueueRolloutFromIstioVirtualService(new) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { c.EnqueueRolloutFromIstioVirtualService(obj) }, }) // When a DestinationRule changes, enqueue the DestinationRule for processing c.DestinationRuleInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { c.EnqueueDestinationRule(obj) }, - UpdateFunc: func(old, new interface{}) { + UpdateFunc: func(old, new any) { c.EnqueueDestinationRule(new) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { c.EnqueueDestinationRule(obj) }, }) @@ -158,13 +158,13 @@ func (c *IstioController) Run(ctx context.Context) { // EnqueueDestinationRule examines a VirtualService, finds the Rollout referencing // that VirtualService, and enqueues the corresponding Rollout for reconciliation -func (c *IstioController) EnqueueDestinationRule(obj interface{}) { +func (c *IstioController) EnqueueDestinationRule(obj any) { controllerutil.EnqueueRateLimited(obj, c.destinationRuleWorkqueue) } // EnqueueRolloutFromIstioVirtualService examines a VirtualService, finds the Rollout referencing // that VirtualService, and enqueues the corresponding Rollout for reconciliation -func (c *IstioController) EnqueueRolloutFromIstioVirtualService(vsvc interface{}) { +func (c *IstioController) EnqueueRolloutFromIstioVirtualService(vsvc any) { acc, err := meta.Accessor(vsvc) if err != nil { log.Errorf("Error processing istio VirtualService from watch: %v: %v", err, vsvc) diff --git a/rollout/trafficrouting/istio/controller_test.go b/rollout/trafficrouting/istio/controller_test.go index af37f11053..97d85cbaff 100644 --- a/rollout/trafficrouting/istio/controller_test.go +++ b/rollout/trafficrouting/istio/controller_test.go @@ -45,7 +45,7 @@ func NewFakeIstioController(objs ...runtime.Object) *IstioController { c := NewIstioController(IstioControllerConfig{ ArgoprojClientSet: rolloutClient, DynamicClientSet: dynamicClientSet, - EnqueueRollout: func(ro interface{}) {}, + EnqueueRollout: func(ro any) {}, RolloutsInformer: rolloutInformerFactory.Argoproj().V1alpha1().Rollouts(), VirtualServiceInformer: virtualServiceInformer, DestinationRuleInformer: destinationRuleInformer, @@ -178,7 +178,7 @@ spec: key, err := cache.MetaNamespaceKeyFunc(destRule) assert.NoError(t, err) enqueueCalled := false - c.EnqueueRollout = func(obj interface{}) { + c.EnqueueRollout = func(obj any) { enqueueCalled = true } @@ -199,7 +199,7 @@ spec: key, err := cache.MetaNamespaceKeyFunc(destRule) assert.NoError(t, err) enqueueCalled := false - c.EnqueueRollout = func(obj interface{}) { + c.EnqueueRollout = func(obj any) { enqueueCalled = true } @@ -219,7 +219,7 @@ spec: key, err := cache.MetaNamespaceKeyFunc(destRule) assert.NoError(t, err) enqueueCalled := false - c.EnqueueRollout = func(obj interface{}) { + c.EnqueueRollout = func(obj any) { enqueueCalled = true } diff --git a/rollout/trafficrouting/istio/istio.go b/rollout/trafficrouting/istio/istio.go index 70b0cc0e68..bb9a15eb5e 100644 --- a/rollout/trafficrouting/istio/istio.go +++ b/rollout/trafficrouting/istio/istio.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/argoproj/argo-rollouts/rollout/trafficrouting" + jsonpatch "github.com/evanphx/json-patch/v5" "github.com/mitchellh/mapstructure" log "github.com/sirupsen/logrus" @@ -74,26 +76,26 @@ const ( invalidCasting = "Invalid casting: field '%s' is not of type '%s'" ) -func (patches virtualServicePatches) patchVirtualService(httpRoutes []interface{}, tlsRoutes []interface{}, tcpRoutes []interface{}) error { +func (patches virtualServicePatches) patchVirtualService(httpRoutes []any, tlsRoutes []any, tcpRoutes []any) error { for _, patch := range patches { - var route map[string]interface{} + var route map[string]any err := false if patch.routeType == Http { - route, err = httpRoutes[patch.routeIndex].(map[string]interface{}) + route, err = httpRoutes[patch.routeIndex].(map[string]any) } else if patch.routeType == Tls { - route, err = tlsRoutes[patch.routeIndex].(map[string]interface{}) + route, err = tlsRoutes[patch.routeIndex].(map[string]any) } else if patch.routeType == Tcp { - route, err = tcpRoutes[patch.routeIndex].(map[string]interface{}) + route, err = tcpRoutes[patch.routeIndex].(map[string]any) } if !err { return fmt.Errorf(invalidCasting, patch.routeType+"[]", "map[string]interface") } - destinations, ok := route["route"].([]interface{}) + destinations, ok := route["route"].([]any) if !ok { return fmt.Errorf(invalidCasting, patch.routeType+"[].route", "[]interface") } if patch.destinationIndex < len(destinations) { - destination, ok := destinations[patch.destinationIndex].(map[string]interface{}) + destination, ok := destinations[patch.destinationIndex].(map[string]any) if !ok { return fmt.Errorf(invalidCasting, patch.routeType+"[].route[].destination", "map[string]interface") } @@ -105,9 +107,9 @@ func (patches virtualServicePatches) patchVirtualService(httpRoutes []interface{ } route["route"] = destinations } else { - destination := make(map[string]interface{}, 0) + destination := make(map[string]any, 0) destination["weight"] = float64(patch.weight) - destination["destination"] = map[string]interface{}{"host": patch.host} + destination["destination"] = map[string]any{"host": patch.host} destinations = append(destinations, destination) route["route"] = destinations } @@ -123,8 +125,7 @@ func (patches virtualServicePatches) patchVirtualService(httpRoutes []interface{ } func (r *Reconciler) generateVirtualServicePatches(rolloutVsvcRouteNames []string, httpRoutes []VirtualServiceHTTPRoute, rolloutVsvcTLSRoutes []v1alpha1.TLSRoute, tlsRoutes []VirtualServiceTLSRoute, rolloutVsvcTCPRoutes []v1alpha1.TCPRoute, tcpRoutes []VirtualServiceTCPRoute, desiredWeight int64, additionalDestinations ...v1alpha1.WeightDestination) virtualServicePatches { - canarySvc := r.rollout.Spec.Strategy.Canary.CanaryService - stableSvc := r.rollout.Spec.Strategy.Canary.StableService + stableSvc, canarySvc := trafficrouting.GetStableAndCanaryServices(r.rollout, false) canarySubset := "" stableSubset := "" if r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.DestinationRule != nil { @@ -388,12 +389,12 @@ func (r *Reconciler) UpdateHash(canaryHash, stableHash string, additionalDestina // destinationRuleReplaceExtraMarshal relace the key of "Extra" with the actual content // e.g., "trafficpolicy" and return the bytes of the new object func destinationRuleReplaceExtraMarshal(dRule *DestinationRule) []byte { - dRuleNew := map[string]interface{}{} + dRuleNew := map[string]any{} dRuleNew["metadata"] = dRule.ObjectMeta.DeepCopy() - subsets := []map[string]interface{}{} + subsets := []map[string]any{} for _, subset := range dRule.Spec.Subsets { - newsubset := map[string]interface{}{} + newsubset := map[string]any{} newsubset["name"] = subset.Name newsubset["labels"] = subset.Labels @@ -402,7 +403,7 @@ func destinationRuleReplaceExtraMarshal(dRule *DestinationRule) []byte { continue } - extra := map[string]interface{}{} + extra := map[string]any{} inputbyte, _ := json.Marshal(subset.Extra) json.Unmarshal(inputbyte, &extra) @@ -412,7 +413,7 @@ func destinationRuleReplaceExtraMarshal(dRule *DestinationRule) []byte { } subsets = append(subsets, newsubset) } - dRuleNew["spec"] = map[string]interface{}{ + dRuleNew["spec"] = map[string]any{ "subsets": subsets, "host": dRule.Spec.Host, } @@ -474,7 +475,7 @@ func unstructuredToDestinationRules(un *unstructured.Unstructured) ([]byte, *Des func unMarshalSubsets(dRule *DestinationRule, dRuleBytes []byte) error { var err error - unstructured := map[string]interface{}{} + unstructured := map[string]any{} var extractFieldBytes func([]byte, string) ([]byte, error) extractFieldBytes = func(input []byte, name string) ([]byte, error) { err = json.Unmarshal(input, &unstructured) @@ -498,7 +499,7 @@ func unMarshalSubsets(dRule *DestinationRule, dRuleBytes []byte) error { return err } - subsetsMap := []map[string]interface{}{} + subsetsMap := []map[string]any{} err = json.Unmarshal(subsetsBytes, &subsetsMap) if err != nil { return err @@ -523,9 +524,9 @@ func unMarshalSubsets(dRule *DestinationRule, dRuleBytes []byte) error { return nil } -func UnmarshalJson(input []byte, result interface{}) (map[string]interface{}, error) { +func UnmarshalJson(input []byte, result any) (map[string]any, error) { // unmarshal json to a map - foomap := make(map[string]interface{}) + foomap := make(map[string]any) json.Unmarshal(input, &foomap) // create a mapstructure decoder @@ -545,7 +546,7 @@ func UnmarshalJson(input []byte, result interface{}) (map[string]interface{}, er } // copy and return unused fields - unused := map[string]interface{}{} + unused := map[string]any{} for _, k := range md.Unused { unused[k] = foomap[k] } @@ -565,7 +566,7 @@ func jsonBytesToDestinationRule(dRuleBytes []byte) (*DestinationRule, error) { return &dRule, nil } -func GetHttpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { +func GetHttpRoutesI(obj *unstructured.Unstructured) ([]any, error) { httpRoutesI, notFound, err := unstructured.NestedSlice(obj.Object, "spec", Http) if !notFound { return nil, fmt.Errorf(SpecHttpNotFound) @@ -576,7 +577,7 @@ func GetHttpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { return httpRoutesI, nil } -func GetTlsRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { +func GetTlsRoutesI(obj *unstructured.Unstructured) ([]any, error) { tlsRoutesI, notFound, err := unstructured.NestedSlice(obj.Object, "spec", Tls) if !notFound { return nil, fmt.Errorf(SpecHttpNotFound) @@ -587,7 +588,7 @@ func GetTlsRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { return tlsRoutesI, nil } -func GetTcpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { +func GetTcpRoutesI(obj *unstructured.Unstructured) ([]any, error) { tcpRoutesI, notFound, err := unstructured.NestedSlice(obj.Object, "spec", Tcp) if !notFound { return nil, fmt.Errorf(".spec.tcp is not defined") @@ -598,7 +599,7 @@ func GetTcpRoutesI(obj *unstructured.Unstructured) ([]interface{}, error) { return tcpRoutesI, nil } -func GetHttpRoutes(httpRoutesI []interface{}) ([]VirtualServiceHTTPRoute, error) { +func GetHttpRoutes(httpRoutesI []any) ([]VirtualServiceHTTPRoute, error) { routeBytes, err := json.Marshal(httpRoutesI) if err != nil { return nil, err @@ -613,7 +614,7 @@ func GetHttpRoutes(httpRoutesI []interface{}) ([]VirtualServiceHTTPRoute, error) return httpRoutes, nil } -func GetTlsRoutes(obj *unstructured.Unstructured, tlsRoutesI []interface{}) ([]VirtualServiceTLSRoute, error) { +func GetTlsRoutes(obj *unstructured.Unstructured, tlsRoutesI []any) ([]VirtualServiceTLSRoute, error) { routeBytes, err := json.Marshal(tlsRoutesI) if err != nil { return nil, err @@ -628,7 +629,7 @@ func GetTlsRoutes(obj *unstructured.Unstructured, tlsRoutesI []interface{}) ([]V return tlsRoutes, nil } -func GetTcpRoutes(obj *unstructured.Unstructured, tcpRoutesI []interface{}) ([]VirtualServiceTCPRoute, error) { +func GetTcpRoutes(obj *unstructured.Unstructured, tcpRoutesI []any) ([]VirtualServiceTCPRoute, error) { routeBytes, err := json.Marshal(tcpRoutesI) if err != nil { return nil, err @@ -717,7 +718,7 @@ func (r *Reconciler) reconcileVirtualServiceHeaderRoutes(virtualService v1alpha1 return err } - canarySvc := r.rollout.Spec.Strategy.Canary.CanaryService + _, canarySvc := trafficrouting.GetStableAndCanaryServices(r.rollout, false) if destRuleHost != "" { canarySvc = destRuleHost } @@ -825,8 +826,8 @@ func (r *Reconciler) getDestinationRule(dRuleSpec *v1alpha1.IstioDestinationRule return origBytes, dRule, dRuleNew, nil } -func createHeaderRoute(virtualService v1alpha1.IstioVirtualService, unVsvc *unstructured.Unstructured, headerRouting *v1alpha1.SetHeaderRoute, host string, subset string) map[string]interface{} { - var routeMatches []interface{} +func createHeaderRoute(virtualService v1alpha1.IstioVirtualService, unVsvc *unstructured.Unstructured, headerRouting *v1alpha1.SetHeaderRoute, host string, subset string) map[string]any { + var routeMatches []any for _, hrm := range headerRouting.Match { routeMatches = append(routeMatches, createHeaderRouteMatch(hrm)) } @@ -838,41 +839,41 @@ func createHeaderRoute(virtualService v1alpha1.IstioVirtualService, unVsvc *unst canaryDestination := routeDestination(host, port.Number, subset, 100) - return map[string]interface{}{ + return map[string]any{ "name": headerRouting.Name, "match": routeMatches, - "route": []interface{}{canaryDestination}, + "route": []any{canaryDestination}, } } -func createHeaderRouteMatch(hrm v1alpha1.HeaderRoutingMatch) interface{} { - res := map[string]interface{}{} +func createHeaderRouteMatch(hrm v1alpha1.HeaderRoutingMatch) any { + res := map[string]any{} value := hrm.HeaderValue setMapValueIfNotEmpty(res, "exact", value.Exact) setMapValueIfNotEmpty(res, "regex", value.Regex) setMapValueIfNotEmpty(res, "prefix", value.Prefix) - return map[string]interface{}{ - "headers": map[string]interface{}{hrm.HeaderName: res}, + return map[string]any{ + "headers": map[string]any{hrm.HeaderName: res}, } } -func setMapValueIfNotEmpty(m map[string]interface{}, key string, value string) { +func setMapValueIfNotEmpty(m map[string]any, key string, value string) { if value != "" { m[key] = value } } -func routeDestination(host string, port uint32, subset string, weight int64) map[string]interface{} { - dest := map[string]interface{}{ +func routeDestination(host string, port uint32, subset string, weight int64) map[string]any { + dest := map[string]any{ "host": host, } if port > 0 { - dest["port"] = map[string]interface{}{"number": int64(port)} + dest["port"] = map[string]any{"number": int64(port)} } if subset != "" { dest["subset"] = subset } - routeValue := map[string]interface{}{ + routeValue := map[string]any{ "weight": float64(weight), "destination": dest, } @@ -1022,8 +1023,7 @@ func searchTcpRoute(tcpRoute v1alpha1.TCPRoute, istioTcpRoutes []VirtualServiceT // ValidateHTTPRoutes ensures that all the routes in the rollout exist func ValidateHTTPRoutes(r *v1alpha1.Rollout, routeNames []string, httpRoutes []VirtualServiceHTTPRoute) error { - stableSvc := r.Spec.Strategy.Canary.StableService - canarySvc := r.Spec.Strategy.Canary.CanaryService + stableSvc, canarySvc := trafficrouting.GetStableAndCanaryServices(r, false) routeIndexesToPatch, err := getHttpRouteIndexesToPatch(routeNames, httpRoutes) if err != nil { @@ -1041,10 +1041,10 @@ func ValidateHTTPRoutes(r *v1alpha1.Rollout, routeNames []string, httpRoutes []V if err != nil { return fmt.Errorf("[ValidateHTTPRoutes] failed to marshal http routes: %w", err) } - var httpRoutesI []interface{} + var httpRoutesI []any err = json.Unmarshal(httpRoutesBytes, &httpRoutesI) if err != nil { - return fmt.Errorf("[ValidateHTTPRoutes] failed to marshal http routes to []interface{}: %w", err) + return fmt.Errorf("[ValidateHTTPRoutes] failed to marshal http routes to []any: %w", err) } _, httpRoutesNotWithinManagedRoutes, err := splitManagedRoutesAndNonManagedRoutes(r.Spec.Strategy.Canary.TrafficRouting.ManagedRoutes, httpRoutesI) @@ -1060,8 +1060,7 @@ func ValidateHTTPRoutes(r *v1alpha1.Rollout, routeNames []string, httpRoutes []V // ValidateTlsRoutes ensures that all the routes in the rollout exist and they only have two destinations func ValidateTlsRoutes(r *v1alpha1.Rollout, vsvcTLSRoutes []v1alpha1.TLSRoute, tlsRoutes []VirtualServiceTLSRoute) error { - stableSvc := r.Spec.Strategy.Canary.StableService - canarySvc := r.Spec.Strategy.Canary.CanaryService + stableSvc, canarySvc := trafficrouting.GetStableAndCanaryServices(r, false) routeIndexesToPatch, err := getTlsRouteIndexesToPatch(vsvcTLSRoutes, tlsRoutes) if err != nil { @@ -1082,8 +1081,7 @@ func ValidateTlsRoutes(r *v1alpha1.Rollout, vsvcTLSRoutes []v1alpha1.TLSRoute, t // ValidateTcpRoutes ensures that all the routes in the rollout exist and they only have two destinations func ValidateTcpRoutes(r *v1alpha1.Rollout, vsvcTCPRoutes []v1alpha1.TCPRoute, tcpRoutes []VirtualServiceTCPRoute) error { - stableSvc := r.Spec.Strategy.Canary.StableService - canarySvc := r.Spec.Strategy.Canary.CanaryService + stableSvc, canarySvc := trafficrouting.GetStableAndCanaryServices(r, false) routeIndexesToPatch, err := getTcpRouteIndexesToPatch(vsvcTCPRoutes, tcpRoutes) if err != nil { @@ -1191,7 +1189,7 @@ func (r *Reconciler) reconcileVirtualServiceMirrorRoutes(virtualService v1alpha1 if err != nil { return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to get destination rule host: %w", err) } - canarySvc := r.rollout.Spec.Strategy.Canary.CanaryService + _, canarySvc := trafficrouting.GetStableAndCanaryServices(r.rollout, false) if destRuleHost != "" { canarySvc = destRuleHost } @@ -1234,7 +1232,7 @@ func (r *Reconciler) reconcileVirtualServiceMirrorRoutes(virtualService v1alpha1 if !found { return fmt.Errorf(SpecHttpNotFound) } - vsRoutes = append([]interface{}{mR}, vsRoutes...) + vsRoutes = append([]any{mR}, vsRoutes...) if err := unstructured.SetNestedSlice(istioVirtualService.Object, vsRoutes, "spec", Http); err != nil { return fmt.Errorf("[reconcileVirtualServiceMirrorRoutes] failed to update virtual service routes via set nested slice: %w", err) } @@ -1243,8 +1241,8 @@ func (r *Reconciler) reconcileVirtualServiceMirrorRoutes(virtualService v1alpha1 } // getVirtualServiceHttpRoutes This returns all the http routes from an istio virtual service as both a rollouts wrapped type -// []VirtualServiceHTTPRoute and a []interface{} of VirtualServiceHTTPRoute -func getVirtualServiceHttpRoutes(obj *unstructured.Unstructured) ([]VirtualServiceHTTPRoute, []interface{}, error) { +// []VirtualServiceHTTPRoute and a []any of VirtualServiceHTTPRoute +func getVirtualServiceHttpRoutes(obj *unstructured.Unstructured) ([]VirtualServiceHTTPRoute, []any, error) { httpRoutesI, err := GetHttpRoutesI(obj) if err != nil { return nil, nil, fmt.Errorf("[getVirtualServiceHttpRoutes] failed to get http route interfaces: %w", err) @@ -1256,9 +1254,9 @@ func getVirtualServiceHttpRoutes(obj *unstructured.Unstructured) ([]VirtualServi return routes, httpRoutesI, nil } -// createMirrorRoute This returns a map[string]interface{} of an istio virtual service mirror route configuration using the last +// createMirrorRoute This returns a map[string]any of an istio virtual service mirror route configuration using the last // set weight as values for the non-matching destinations and canary service for the matching destination. -func createMirrorRoute(virtualService v1alpha1.IstioVirtualService, httpRoutes []VirtualServiceHTTPRoute, mirrorRouting *v1alpha1.SetMirrorRoute, canarySvc string, subset string) (map[string]interface{}, error) { +func createMirrorRoute(virtualService v1alpha1.IstioVirtualService, httpRoutes []VirtualServiceHTTPRoute, mirrorRouting *v1alpha1.SetMirrorRoute, canarySvc string, subset string) (map[string]any, error) { var percent int32 if mirrorRouting.Percentage == nil { percent = 100 @@ -1289,12 +1287,12 @@ func createMirrorRoute(virtualService v1alpha1.IstioVirtualService, httpRoutes [ mirrorDestinations.Port = &Port{Number: route[0].Destination.Port.Number} } - mirrorRoute := map[string]interface{}{ + mirrorRoute := map[string]any{ "name": mirrorRouting.Name, "match": istioMatch, "route": route, "mirror": mirrorDestinations, - "mirrorPercentage": map[string]interface{}{"value": float64(percent)}, + "mirrorPercentage": map[string]any{"value": float64(percent)}, } mirrorRouteBytes, err := json.Marshal(mirrorRoute) @@ -1302,7 +1300,7 @@ func createMirrorRoute(virtualService v1alpha1.IstioVirtualService, httpRoutes [ return nil, fmt.Errorf("[createMirrorRoute] failed to marshal mirror route: %w", err) } - var mirrorRouteI map[string]interface{} + var mirrorRouteI map[string]any err = json.Unmarshal(mirrorRouteBytes, &mirrorRouteI) if err != nil { return nil, fmt.Errorf("[createMirrorRoute] failed to unmarshal mirror route: %w", err) @@ -1336,11 +1334,11 @@ func removeRoute(istioVirtualService *unstructured.Unstructured, routeName strin return fmt.Errorf(SpecHttpNotFound) } - var newVsRoutes []interface{} + var newVsRoutes []any for _, route := range vsRoutes { - routeMap, ok := route.(map[string]interface{}) + routeMap, ok := route.(map[string]any) if !ok { - return fmt.Errorf("Could not cast type to map[string]interface{} to find route name in Istio Virtual Service") + return fmt.Errorf("Could not cast type to map[string]any to find route name in Istio Virtual Service") } routeNameIstioSvc, ok := routeMap["name"].(string) if !ok { @@ -1392,8 +1390,8 @@ func (r *Reconciler) orderRoutes(istioVirtualService *unstructured.Unstructured) // splitManagedRoutesAndNonManagedRoutes This splits the routes from an istio virtual service into two slices // one slice contains all the routes that are also in the rollouts managedRoutes object and one that contains routes // that where only in the virtual service (aka routes that where manually added by user) -func splitManagedRoutesAndNonManagedRoutes(managedRoutes []v1alpha1.MangedRoutes, httpRouteI []interface{}) (httpRoutesWithinManagedRoutes []map[string]interface{}, httpRoutesNotWithinManagedRoutes []map[string]interface{}, err error) { - var httpRoutes []map[string]interface{} +func splitManagedRoutesAndNonManagedRoutes(managedRoutes []v1alpha1.MangedRoutes, httpRouteI []any) (httpRoutesWithinManagedRoutes []map[string]any, httpRoutesNotWithinManagedRoutes []map[string]any, err error) { + var httpRoutes []map[string]any jsonHttpRoutes, err := json.Marshal(httpRouteI) if err != nil { @@ -1424,11 +1422,11 @@ func splitManagedRoutesAndNonManagedRoutes(managedRoutes []v1alpha1.MangedRoutes return httpRoutesWithinManagedRoutes, httpRoutesNotWithinManagedRoutes, nil } -// getOrderedVirtualServiceRoutes This returns an []interface{} of istio virtual routes where the routes are ordered based +// getOrderedVirtualServiceRoutes This returns an []any of istio virtual routes where the routes are ordered based // on the rollouts managedRoutes field. We take the routes from the rollouts managedRoutes field order them and place them on top // of routes that are manually defined within the virtual service (aka. routes that users have defined manually) -func getOrderedVirtualServiceRoutes(httpRouteI []interface{}, managedRoutes []v1alpha1.MangedRoutes, httpRoutesWithinManagedRoutes []map[string]interface{}, httpRoutesNotWithinManagedRoutes []map[string]interface{}) ([]interface{}, error) { - var orderedManagedRoutes []map[string]interface{} +func getOrderedVirtualServiceRoutes(httpRouteI []any, managedRoutes []v1alpha1.MangedRoutes, httpRoutesWithinManagedRoutes []map[string]any, httpRoutesNotWithinManagedRoutes []map[string]any) ([]any, error) { + var orderedManagedRoutes []map[string]any for _, route := range managedRoutes { for _, managedRoute := range httpRoutesWithinManagedRoutes { if route.Name == managedRoute["name"] { @@ -1439,10 +1437,10 @@ func getOrderedVirtualServiceRoutes(httpRouteI []interface{}, managedRoutes []v1 orderedVirtualServiceHTTPRoutes := append(orderedManagedRoutes, httpRoutesNotWithinManagedRoutes...) - var orderedInterfaceVSVCHTTPRoutes []interface{} + var orderedInterfaceVSVCHTTPRoutes []any for _, routeMap := range orderedVirtualServiceHTTPRoutes { for _, route := range httpRouteI { - r := route.(map[string]interface{}) + r := route.(map[string]any) // Not checking the cast success here is ok because it covers the case when the route has no name name, rNameOK := r["name"].(string) @@ -1530,7 +1528,7 @@ func (r *Reconciler) RemoveManagedRoutes() error { if err != nil { return fmt.Errorf("[RemoveManagedRoutes] failed to marshal non-managed routes: %w", err) } - var nonManagedRoutesI []interface{} + var nonManagedRoutesI []any if err := json.Unmarshal(jsonNonManagedRoutes, &nonManagedRoutesI); err != nil { return fmt.Errorf("[RemoveManagedRoutes] failed to split managaed and non-managed routes: %w", err) } diff --git a/rollout/trafficrouting/istio/istio_test.go b/rollout/trafficrouting/istio/istio_test.go index ea6474ef5f..c6c0f8d9a1 100644 --- a/rollout/trafficrouting/istio/istio_test.go +++ b/rollout/trafficrouting/istio/istio_test.go @@ -64,6 +64,31 @@ func rollout(stableSvc, canarySvc string, istioVirtualService *v1alpha1.IstioVir } } +func rolloutPingPong(istioVirtualService *v1alpha1.IstioVirtualService) *v1alpha1.Rollout { + return &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: "default", + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + PingPong: &v1alpha1.PingPongSpec{ + PingService: "ping", + PongService: "pong", + }, + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: istioVirtualService, + }, + }, + }, + }, + }, + Status: v1alpha1.RolloutStatus{Canary: v1alpha1.CanaryStatus{StablePingPong: "ping"}}, + } +} + func rolloutWithHttpRoutes(stableSvc, canarySvc, vsvc string, httpRoutes []string) *v1alpha1.Rollout { istioVirtualService := &v1alpha1.IstioVirtualService{ Name: vsvc, @@ -98,6 +123,16 @@ func rolloutWithHttpAndTlsAndTcpRoutes(stableSvc, canarySvc, vsvc string, httpRo return rollout(stableSvc, canarySvc, istioVirtualService) } +func rolloutWithHttpAndTlsAndTcpRoutesPingPong(vsvc string, httpRoutes []string, tlsRoutes []v1alpha1.TLSRoute, tcpRoutes []v1alpha1.TCPRoute) *v1alpha1.Rollout { + istioVirtualService := &v1alpha1.IstioVirtualService{ + Name: vsvc, + Routes: httpRoutes, + TLSRoutes: tlsRoutes, + TCPRoutes: tcpRoutes, + } + return rolloutPingPong(istioVirtualService) +} + func checkDestination(t *testing.T, destinations []VirtualServiceRouteDestination, svc string, expectWeight int) { for _, destination := range destinations { if destination.Destination.Host == svc { @@ -263,6 +298,72 @@ spec: host: canary weight: 0` +const regularMixedVsvcPingPong = `apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: vsvc + namespace: default +spec: + gateways: + - istio-rollout-gateway + hosts: + - istio-rollout.dev.argoproj.io + http: + - name: primary + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0 + - name: secondary + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0 + tls: + - match: + - port: 3000 + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0 + - match: + - port: 3001 + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0 + tcp: + - match: + - port: 3000 + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0 + - match: + - port: 3001 + route: + - destination: + host: 'ping' + weight: 100 + - destination: + host: pong + weight: 0` + const regularMixedVsvcTwoHttpRoutes = `apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: @@ -597,6 +698,14 @@ func extractTcpRoutes(t *testing.T, modifiedObj *unstructured.Unstructured) []Vi } func assertTcpRouteWeightChanges(t *testing.T, tcpRoute VirtualServiceTCPRoute, portNum, canaryWeight, stableWeight int) { + assertTcpRouteWeightChangesBase(t, tcpRoute, portNum, canaryWeight, stableWeight, "stable", "canary") +} + +func assertTcpRouteWeightChangesPingPong(t *testing.T, tcpRoute VirtualServiceTCPRoute, portNum, canaryWeight, stableWeight int) { + assertTcpRouteWeightChangesBase(t, tcpRoute, portNum, canaryWeight, stableWeight, "ping", "pong") +} + +func assertTcpRouteWeightChangesBase(t *testing.T, tcpRoute VirtualServiceTCPRoute, portNum, canaryWeight, stableWeight int, stableSvc, canarySvc string) { portsMap := make(map[int64]bool) for _, routeMatch := range tcpRoute.Match { if routeMatch.Port != 0 { @@ -610,8 +719,8 @@ func assertTcpRouteWeightChanges(t *testing.T, tcpRoute VirtualServiceTCPRoute, if portNum != 0 { assert.Equal(t, portNum, port) } - checkDestination(t, tcpRoute.Route, "stable", stableWeight) - checkDestination(t, tcpRoute.Route, "canary", canaryWeight) + checkDestination(t, tcpRoute.Route, stableSvc, stableWeight) + checkDestination(t, tcpRoute.Route, canarySvc, canaryWeight) } func extractHttpRoutes(t *testing.T, modifiedObj *unstructured.Unstructured) []VirtualServiceHTTPRoute { @@ -643,6 +752,14 @@ func extractTlsRoutes(t *testing.T, modifiedObj *unstructured.Unstructured) []Vi } func assertTlsRouteWeightChanges(t *testing.T, tlsRoute VirtualServiceTLSRoute, snis []string, portNum, canaryWeight, stableWeight int) { + assertTlsRouteWeightChangesBase(t, tlsRoute, snis, portNum, canaryWeight, stableWeight, "stable", "canary") +} + +func assertTlsRouteWeightChangesPingPong(t *testing.T, tlsRoute VirtualServiceTLSRoute, snis []string, portNum, canaryWeight, stableWeight int) { + assertTlsRouteWeightChangesBase(t, tlsRoute, snis, portNum, canaryWeight, stableWeight, "ping", "pong") +} + +func assertTlsRouteWeightChangesBase(t *testing.T, tlsRoute VirtualServiceTLSRoute, snis []string, portNum, canaryWeight, stableWeight int, stableSvc, canarySvc string) { portsMap := make(map[int64]bool) sniHostsMap := make(map[string]bool) for _, routeMatch := range tlsRoute.Match { @@ -667,8 +784,8 @@ func assertTlsRouteWeightChanges(t *testing.T, tlsRoute VirtualServiceTLSRoute, if len(snis) != 0 { assert.Equal(t, evalUtils.Equal(snis, sniHosts), true) } - checkDestination(t, tlsRoute.Route, "stable", stableWeight) - checkDestination(t, tlsRoute.Route, "canary", canaryWeight) + checkDestination(t, tlsRoute.Route, stableSvc, stableWeight) + checkDestination(t, tlsRoute.Route, canarySvc, canaryWeight) } func TestHttpReconcileWeightsBaseCase(t *testing.T) { @@ -866,18 +983,18 @@ func TestHttpReconcileHeaderRouteWithExtra(t *testing.T) { assert.NoError(t, err) assert.True(t, found) - r0 := routes[0].(map[string]interface{}) - route, found := r0["route"].([]interface{}) + r0 := routes[0].(map[string]any) + route, found := r0["route"].([]any) assert.True(t, found) - port1 := route[0].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + port1 := route[0].(map[string]any)["destination"].(map[string]any)["port"].(map[string]any)["number"] assert.True(t, port1 == int64(8443)) - r1 := routes[1].(map[string]interface{}) + r1 := routes[1].(map[string]any) _, found = r1["retries"] assert.True(t, found) - r2 := routes[2].(map[string]interface{}) + r2 := routes[2].(map[string]any) _, found = r2["retries"] assert.True(t, found) _, found = r2["corsPolicy"] @@ -891,14 +1008,14 @@ func TestHttpReconcileHeaderRouteWithExtra(t *testing.T) { assert.NoError(t, err) assert.True(t, found) - r0 = routes[0].(map[string]interface{}) - route, found = r0["route"].([]interface{}) + r0 = routes[0].(map[string]any) + route, found = r0["route"].([]any) assert.True(t, found) - port1 = route[0].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + port1 = route[0].(map[string]any)["destination"].(map[string]any)["port"].(map[string]any)["number"] assert.True(t, port1 == float64(8443)) - r2 = routes[1].(map[string]interface{}) + r2 = routes[1].(map[string]any) _, found = r2["retries"] assert.True(t, found) _, found = r2["corsPolicy"] @@ -1104,6 +1221,57 @@ func TestReconcileWeightsBaseCase(t *testing.T) { assertTcpRouteWeightChanges(t, tcpRoutes[1], 3001, 0, 100) } +func TestReconcileWeightsPingPongBaseCase(t *testing.T) { + r := &Reconciler{ + rollout: rolloutWithHttpAndTlsAndTcpRoutesPingPong("vsvc", []string{"primary"}, + []v1alpha1.TLSRoute{ + { + Port: 3000, + }, + }, + []v1alpha1.TCPRoute{ + { + Port: 3000, + }, + }, + ), + } + obj := unstructuredutil.StrToUnstructuredUnsafe(regularMixedVsvcPingPong) + vsvcRoutes := r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Routes + vsvcTLSRoutes := r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.TLSRoutes + vsvcTCPRoutes := r.rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.TCPRoutes + modifiedObj, _, err := r.reconcileVirtualService(obj, vsvcRoutes, vsvcTLSRoutes, vsvcTCPRoutes, 20) + assert.Nil(t, err) + assert.NotNil(t, modifiedObj) + + // HTTP Routes + httpRoutes := extractHttpRoutes(t, modifiedObj) + + // Assertions + assert.Equal(t, httpRoutes[0].Name, "primary") + checkDestination(t, httpRoutes[0].Route, "ping", 80) + checkDestination(t, httpRoutes[0].Route, "pong", 20) + + //assertHttpRouteWeightChanges(t, httpRoutes[1], "secondary", 0, 100) + assert.Equal(t, httpRoutes[1].Name, "secondary") + checkDestination(t, httpRoutes[1].Route, "ping", 100) + checkDestination(t, httpRoutes[1].Route, "pong", 0) + + // TLS Routes + tlsRoutes := extractTlsRoutes(t, modifiedObj) + // + // Assestions + assertTlsRouteWeightChangesPingPong(t, tlsRoutes[0], nil, 3000, 20, 80) + assertTlsRouteWeightChangesPingPong(t, tlsRoutes[1], nil, 3001, 0, 100) + // + // TCP Routes + tcpRoutes := extractTcpRoutes(t, modifiedObj) + + // Assestions + assertTcpRouteWeightChangesPingPong(t, tcpRoutes[0], 3000, 20, 80) + assertTcpRouteWeightChangesPingPong(t, tcpRoutes[1], 3001, 0, 100) +} + func TestReconcileUpdateVirtualService(t *testing.T) { ro := rolloutWithHttpRoutes("stable", "canary", "vsvc", []string{"primary"}) AssertReconcileUpdateVirtualService(t, regularVsvc, ro) @@ -1498,34 +1666,34 @@ func TestInvalidPatches(t *testing.T) { weight: 10, }} { - invalidHTTPRoute := make([]interface{}, 1) - invalidTlsRoute := make([]interface{}, 1) - invalidTcpRoute := make([]interface{}, 1) + invalidHTTPRoute := make([]any, 1) + invalidTlsRoute := make([]any, 1) + invalidTcpRoute := make([]any, 1) invalidHTTPRoute[0] = "not a map" err := patches.patchVirtualService(invalidHTTPRoute, invalidTlsRoute, invalidTcpRoute) assert.Error(t, err, invalidCasting, "http[]", "map[string]interface") } { - invalidHTTPRoute := []interface{}{ - map[string]interface{}{ + invalidHTTPRoute := []any{ + map[string]any{ "route": "not a []interface", }, } - invalidTlsRoute := make([]interface{}, 1) - invalidTcpRoute := make([]interface{}, 1) + invalidTlsRoute := make([]any, 1) + invalidTcpRoute := make([]any, 1) err := patches.patchVirtualService(invalidHTTPRoute, invalidTlsRoute, invalidTcpRoute) assert.Error(t, err, invalidCasting, "http[].route", "[]interface") } { - invalidHTTPRoute := []interface{}{ - map[string]interface{}{ - "route": []interface{}{ + invalidHTTPRoute := []any{ + map[string]any{ + "route": []any{ "destination", }, }, } - invalidTlsRoute := make([]interface{}, 1) - invalidTCPRoute := make([]interface{}, 1) + invalidTlsRoute := make([]any, 1) + invalidTCPRoute := make([]any, 1) err := patches.patchVirtualService(invalidHTTPRoute, invalidTlsRoute, invalidTCPRoute) assert.Error(t, err, invalidCasting, "http[].route[].destination", "map[string]interface") } @@ -2543,20 +2711,20 @@ func TestHttpReconcileMirrorRouteWithExtraFields(t *testing.T) { assert.NoError(t, err) assert.True(t, found) - r0 := routes[0].(map[string]interface{}) - mirrorRoute, found := r0["route"].([]interface{}) + r0 := routes[0].(map[string]any) + mirrorRoute, found := r0["route"].([]any) assert.True(t, found) - port1 := mirrorRoute[0].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] - port2 := mirrorRoute[1].(map[string]interface{})["destination"].(map[string]interface{})["port"].(map[string]interface{})["number"] + port1 := mirrorRoute[0].(map[string]any)["destination"].(map[string]any)["port"].(map[string]any)["number"] + port2 := mirrorRoute[1].(map[string]any)["destination"].(map[string]any)["port"].(map[string]any)["number"] assert.True(t, port1 == float64(8443)) assert.True(t, port2 == float64(8443)) - r1 := routes[1].(map[string]interface{}) + r1 := routes[1].(map[string]any) _, found = r1["retries"] assert.True(t, found) - r2 := routes[2].(map[string]interface{}) + r2 := routes[2].(map[string]any) _, found = r2["retries"] assert.True(t, found) _, found = r2["corsPolicy"] diff --git a/rollout/trafficrouting/istio/istio_types.go b/rollout/trafficrouting/istio/istio_types.go index 9d8f37d3f3..9544efc49e 100644 --- a/rollout/trafficrouting/istio/istio_types.go +++ b/rollout/trafficrouting/istio/istio_types.go @@ -108,5 +108,5 @@ type Subset struct { Name string `json:"name,omitempty"` Labels map[string]string `json:"labels,omitempty"` // TrafficPolicy *json.RawMessage `json:"trafficPolicy,omitempty"` - Extra map[string]interface{} `json:",omitempty"` + Extra map[string]any `json:",omitempty"` } diff --git a/rollout/trafficrouting/nginx/nginx.go b/rollout/trafficrouting/nginx/nginx.go index 3d56c55f6a..649931c258 100644 --- a/rollout/trafficrouting/nginx/nginx.go +++ b/rollout/trafficrouting/nginx/nginx.go @@ -133,6 +133,10 @@ func (r *Reconciler) buildCanaryIngress(stableIngress *networkingv1.Ingress, nam desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary", annotationPrefix)] = "true" desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary-weight", annotationPrefix)] = fmt.Sprintf("%d", desiredWeight) + if r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight != nil { + weightTotal := *r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight + desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary-weight-total", annotationPrefix)] = fmt.Sprintf("%d", weightTotal) + } return ingressutil.NewIngress(desiredCanaryIngress), nil } @@ -209,6 +213,10 @@ func (r *Reconciler) buildLegacyCanaryIngress(stableIngress *extensionsv1beta1.I desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary", annotationPrefix)] = "true" desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary-weight", annotationPrefix)] = fmt.Sprintf("%d", desiredWeight) + if r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight != nil { + weightTotal := *r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight + desiredCanaryIngress.Annotations[fmt.Sprintf("%s/canary-weight-total", annotationPrefix)] = fmt.Sprintf("%d", weightTotal) + } return ingressutil.NewLegacyIngress(desiredCanaryIngress), nil } diff --git a/rollout/trafficrouting/nginx/nginx_test.go b/rollout/trafficrouting/nginx/nginx_test.go index 2be80fa23a..c3b4ce357a 100644 --- a/rollout/trafficrouting/nginx/nginx_test.go +++ b/rollout/trafficrouting/nginx/nginx_test.go @@ -574,6 +574,52 @@ func TestCanaryIngressAdditionalAnnotations(t *testing.T) { } } +func TestCanaryIngressMaxWeightInTrafficRouting(t *testing.T) { + maxWeights := []*int32{nil, pointer.Int32(1000)} + for _, maxWeight := range maxWeights { + tests := generateMultiIngressTestData() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r := Reconciler{ + cfg: ReconcilerConfig{ + Rollout: fakeRollout(stableService, canaryService, test.singleIngress, test.multiIngress), + }, + } + r.cfg.Rollout.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight = maxWeight + for _, ing := range test.ingresses { + stable := extensionsIngress(ing, 80, stableService) + canary := extensionsIngress("canary-ingress", 80, canaryService) + canary.SetAnnotations(map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-weight": "10", + }) + stableIngress := ingressutil.NewLegacyIngress(stable) + canaryIngress := ingressutil.NewLegacyIngress(canary) + + desiredCanaryIngress, err := r.canaryIngress(stableIngress, ingressutil.GetCanaryIngressName(r.cfg.Rollout.GetName(), ing), 15) + assert.Nil(t, err, "No error returned when calling canaryIngress") + + checkBackendService(t, desiredCanaryIngress, canaryService) + + patch, modified, err := ingressutil.BuildIngressPatch(canaryIngress.Mode(), canaryIngress, desiredCanaryIngress, + ingressutil.WithAnnotations(), ingressutil.WithLabels(), ingressutil.WithSpec()) + assert.Nil(t, err, "compareCanaryIngresses returns no error") + assert.True(t, modified, "compareCanaryIngresses returns modified=true") + if maxWeight == nil { + assert.Equal(t, + "{\"metadata\":{\"annotations\":{\"nginx.ingress.kubernetes.io/canary-weight\":\"15\"}}}", string(patch), "compareCanaryIngresses returns expected patch") + } else { + assert.Equal(t, + fmt.Sprintf("{\"metadata\":{\"annotations\":{\"nginx.ingress.kubernetes.io/canary-weight\":\"15\",\"nginx.ingress.kubernetes.io/canary-weight-total\":\"%d\"}}}", *maxWeight), + string(patch), "compareCanaryIngresses returns expected patch") + } + } + }) + } + } + +} + func TestReconciler_canaryIngress(t *testing.T) { tests := generateMultiIngressTestData() for _, test := range tests { diff --git a/rollout/trafficrouting/plugin/client/client.go b/rollout/trafficrouting/plugin/client/client.go index 6350c2c578..a9a8b0b713 100644 --- a/rollout/trafficrouting/plugin/client/client.go +++ b/rollout/trafficrouting/plugin/client/client.go @@ -52,7 +52,7 @@ func (t *trafficPlugin) startPlugin(pluginName string) (rpc.TrafficRouterPlugin, if t.pluginClient[pluginName] == nil || t.pluginClient[pluginName].Exited() { - pluginPath, err := plugin.GetPluginLocation(pluginName) + pluginPath, args, err := plugin.GetPluginInfo(pluginName) if err != nil { return nil, fmt.Errorf("unable to find plugin (%s): %w", pluginName, err) } @@ -60,7 +60,7 @@ func (t *trafficPlugin) startPlugin(pluginName string) (rpc.TrafficRouterPlugin, t.pluginClient[pluginName] = goPlugin.NewClient(&goPlugin.ClientConfig{ HandshakeConfig: handshakeConfig, Plugins: pluginMap, - Cmd: exec.Command(pluginPath), + Cmd: exec.Command(pluginPath, args...), Managed: true, }) @@ -83,7 +83,7 @@ func (t *trafficPlugin) startPlugin(pluginName string) (rpc.TrafficRouterPlugin, resp := t.plugin[pluginName].InitPlugin() if resp.HasError() { - return nil, fmt.Errorf("unable to initialize plugin via rpc (%s): %w", pluginName, err) + return nil, fmt.Errorf("unable to initialize plugin via rpc (%s): %w", pluginName, resp) } } diff --git a/rollout/trafficrouting/plugin/rpc/rpc.go b/rollout/trafficrouting/plugin/rpc/rpc.go index 78cb4103d9..297ff9d21b 100644 --- a/rollout/trafficrouting/plugin/rpc/rpc.go +++ b/rollout/trafficrouting/plugin/rpc/rpc.go @@ -65,7 +65,7 @@ type TrafficRouterPluginRPC struct{ client *rpc.Client } // this gets called once during startup of the plugin and can be used to set up informers or k8s clients etc. func (g *TrafficRouterPluginRPC) InitPlugin() types.RpcError { var resp types.RpcError - err := g.client.Call("Plugin.InitPlugin", new(interface{}), &resp) + err := g.client.Call("Plugin.InitPlugin", new(any), &resp) if err != nil { return types.RpcError{ErrorString: fmt.Sprintf("InitPlugin rpc call error: %s", err)} } @@ -75,7 +75,7 @@ func (g *TrafficRouterPluginRPC) InitPlugin() types.RpcError { // UpdateHash informs a traffic routing reconciler about new canary, stable, and additionalDestination(s) pod hashes func (g *TrafficRouterPluginRPC) UpdateHash(rollout *v1alpha1.Rollout, canaryHash string, stableHash string, additionalDestinations []v1alpha1.WeightDestination) types.RpcError { var resp types.RpcError - var args interface{} = UpdateHashArgs{ + var args any = UpdateHashArgs{ Rollout: *rollout, CanaryHash: canaryHash, StableHash: stableHash, @@ -91,7 +91,7 @@ func (g *TrafficRouterPluginRPC) UpdateHash(rollout *v1alpha1.Rollout, canaryHas // SetWeight sets the canary weight to the desired weight func (g *TrafficRouterPluginRPC) SetWeight(rollout *v1alpha1.Rollout, desiredWeight int32, additionalDestinations []v1alpha1.WeightDestination) types.RpcError { var resp types.RpcError - var args interface{} = SetWeightAndVerifyWeightArgs{ + var args any = SetWeightAndVerifyWeightArgs{ Rollout: *rollout, DesiredWeight: desiredWeight, AdditionalDestinations: additionalDestinations, @@ -106,7 +106,7 @@ func (g *TrafficRouterPluginRPC) SetWeight(rollout *v1alpha1.Rollout, desiredWei // SetHeaderRoute sets the header routing step func (g *TrafficRouterPluginRPC) SetHeaderRoute(rollout *v1alpha1.Rollout, setHeaderRoute *v1alpha1.SetHeaderRoute) types.RpcError { var resp types.RpcError - var args interface{} = SetHeaderArgs{ + var args any = SetHeaderArgs{ Rollout: *rollout, SetHeaderRoute: *setHeaderRoute, } @@ -120,7 +120,7 @@ func (g *TrafficRouterPluginRPC) SetHeaderRoute(rollout *v1alpha1.Rollout, setHe // SetMirrorRoute sets up the traffic router to mirror traffic to a service func (g *TrafficRouterPluginRPC) SetMirrorRoute(rollout *v1alpha1.Rollout, setMirrorRoute *v1alpha1.SetMirrorRoute) types.RpcError { var resp types.RpcError - var args interface{} = SetMirrorArgs{ + var args any = SetMirrorArgs{ Rollout: *rollout, SetMirrorRoute: *setMirrorRoute, } @@ -134,7 +134,7 @@ func (g *TrafficRouterPluginRPC) SetMirrorRoute(rollout *v1alpha1.Rollout, setMi // Type returns the type of the traffic routing reconciler func (g *TrafficRouterPluginRPC) Type() string { var resp string - err := g.client.Call("Plugin.Type", new(interface{}), &resp) + err := g.client.Call("Plugin.Type", new(any), &resp) if err != nil { return fmt.Sprintf("Type rpc call error: %s", err) } @@ -146,7 +146,7 @@ func (g *TrafficRouterPluginRPC) Type() string { // Returns nil if weight verification is not supported or not applicable func (g *TrafficRouterPluginRPC) VerifyWeight(rollout *v1alpha1.Rollout, desiredWeight int32, additionalDestinations []v1alpha1.WeightDestination) (types.RpcVerified, types.RpcError) { var resp VerifyWeightResponse - var args interface{} = SetWeightAndVerifyWeightArgs{ + var args any = SetWeightAndVerifyWeightArgs{ Rollout: *rollout, DesiredWeight: desiredWeight, AdditionalDestinations: additionalDestinations, @@ -161,7 +161,7 @@ func (g *TrafficRouterPluginRPC) VerifyWeight(rollout *v1alpha1.Rollout, desired // RemoveAllRoutes Removes all routes that are managed by rollouts by looking at spec.strategy.canary.trafficRouting.managedRoutes func (g *TrafficRouterPluginRPC) RemoveManagedRoutes(rollout *v1alpha1.Rollout) types.RpcError { var resp types.RpcError - var args interface{} = RemoveManagedRoutesArgs{ + var args any = RemoveManagedRoutesArgs{ Rollout: *rollout, } err := g.client.Call("Plugin.RemoveManagedRoutes", &args, &resp) @@ -180,13 +180,13 @@ type TrafficRouterRPCServer struct { // InitPlugin this is the server aka the controller side function that receives calls from the client side rpc (controller) // this gets called once during startup of the plugin and can be used to set up informers or k8s clients etc. -func (s *TrafficRouterRPCServer) InitPlugin(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) InitPlugin(args any, resp *types.RpcError) error { *resp = s.Impl.InitPlugin() return nil } // UpdateHash informs a traffic routing reconciler about new canary, stable, and additionalDestination(s) pod hashes -func (s *TrafficRouterRPCServer) UpdateHash(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) UpdateHash(args any, resp *types.RpcError) error { runArgs, ok := args.(*UpdateHashArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -196,7 +196,7 @@ func (s *TrafficRouterRPCServer) UpdateHash(args interface{}, resp *types.RpcErr } // SetWeight sets the canary weight to the desired weight -func (s *TrafficRouterRPCServer) SetWeight(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) SetWeight(args any, resp *types.RpcError) error { setWeigthArgs, ok := args.(*SetWeightAndVerifyWeightArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -206,7 +206,7 @@ func (s *TrafficRouterRPCServer) SetWeight(args interface{}, resp *types.RpcErro } // SetHeaderRoute sets the header routing step -func (s *TrafficRouterRPCServer) SetHeaderRoute(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) SetHeaderRoute(args any, resp *types.RpcError) error { setHeaderArgs, ok := args.(*SetHeaderArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -216,7 +216,7 @@ func (s *TrafficRouterRPCServer) SetHeaderRoute(args interface{}, resp *types.Rp } // SetMirrorRoute sets up the traffic router to mirror traffic to a service -func (s *TrafficRouterRPCServer) SetMirrorRoute(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) SetMirrorRoute(args any, resp *types.RpcError) error { setMirrorArgs, ok := args.(*SetMirrorArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -226,14 +226,14 @@ func (s *TrafficRouterRPCServer) SetMirrorRoute(args interface{}, resp *types.Rp } // Type returns the type of the traffic routing reconciler -func (s *TrafficRouterRPCServer) Type(args interface{}, resp *string) error { +func (s *TrafficRouterRPCServer) Type(args any, resp *string) error { *resp = s.Impl.Type() return nil } // VerifyWeight returns true if the canary is at the desired weight and additionalDestinations are at the weights specified // Returns nil if weight verification is not supported or not applicable -func (s *TrafficRouterRPCServer) VerifyWeight(args interface{}, resp *VerifyWeightResponse) error { +func (s *TrafficRouterRPCServer) VerifyWeight(args any, resp *VerifyWeightResponse) error { verifyWeightArgs, ok := args.(*SetWeightAndVerifyWeightArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -247,7 +247,7 @@ func (s *TrafficRouterRPCServer) VerifyWeight(args interface{}, resp *VerifyWeig } // RemoveAllRoutes Removes all routes that are managed by rollouts by looking at spec.strategy.canary.trafficRouting.managedRoutes -func (s *TrafficRouterRPCServer) RemoveManagedRoutes(args interface{}, resp *types.RpcError) error { +func (s *TrafficRouterRPCServer) RemoveManagedRoutes(args any, resp *types.RpcError) error { removeManagedRoutesArgs, ok := args.(*RemoveManagedRoutesArgs) if !ok { return fmt.Errorf("invalid args %s", args) @@ -271,10 +271,10 @@ type RpcTrafficRouterPlugin struct { Impl TrafficRouterPlugin } -func (p *RpcTrafficRouterPlugin) Server(*plugin.MuxBroker) (interface{}, error) { +func (p *RpcTrafficRouterPlugin) Server(*plugin.MuxBroker) (any, error) { return &TrafficRouterRPCServer{Impl: p.Impl}, nil } -func (RpcTrafficRouterPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { +func (RpcTrafficRouterPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (any, error) { return &TrafficRouterPluginRPC{client: c}, nil } diff --git a/rollout/trafficrouting/service_helper.go b/rollout/trafficrouting/service_helper.go index 15d03cad53..8bae069fe7 100644 --- a/rollout/trafficrouting/service_helper.go +++ b/rollout/trafficrouting/service_helper.go @@ -7,17 +7,26 @@ import ( // GetStableAndCanaryServices return a service names for current stable and canary services. // If ping-pong feature enabled then the current ping or pong service will be returned. Which is a stable is defined // based on a rollout status field Status.Canary.StablePingPong -func GetStableAndCanaryServices(ro *v1alpha1.Rollout) (string, string) { - if IsPingPongEnabled(ro) { + +// isPingpongPreferred is needed when Rollout uses both pingpong service and stable/canary service +// for ALB trafficRouting, isPingpongPreferred is true. It uses pingpong service as priority +// for other trafficRouting, isPingpongPrefrered is false. It uses stable/canary service +// This is to ensure it is compatible with previous release. + +func GetStableAndCanaryServices(ro *v1alpha1.Rollout, isPingpongPreferred bool) (string, string) { + pingPongNotPreferredOtherServiceNotDefined := !isPingpongPreferred && ro.Spec.Strategy.Canary.StableService == "" && ro.Spec.Strategy.Canary.CanaryService == "" + if IsPingPongEnabled(ro) && + (isPingpongPreferred || pingPongNotPreferredOtherServiceNotDefined) { canary := ro.Spec.Strategy.Canary if IsStablePing(ro) { return canary.PingPong.PingService, canary.PingPong.PongService } else { return canary.PingPong.PongService, canary.PingPong.PingService } - } else { - return ro.Spec.Strategy.Canary.StableService, ro.Spec.Strategy.Canary.CanaryService } + + return ro.Spec.Strategy.Canary.StableService, ro.Spec.Strategy.Canary.CanaryService + } // IsStablePing return true if the 'ping' service is pointing to the stable replica set. diff --git a/rollout/trafficrouting/service_helper_test.go b/rollout/trafficrouting/service_helper_test.go new file mode 100644 index 0000000000..cc0634eb6f --- /dev/null +++ b/rollout/trafficrouting/service_helper_test.go @@ -0,0 +1,82 @@ +package trafficrouting + +import ( + "testing" + + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const PING_SVC = "ping-service" +const PONG_SVC = "pong-service" + +func fakeRollout(stableSvc, canarySvc string, pingPong *v1alpha1.PingPongSpec, stableIng string, port int32) *v1alpha1.Rollout { + return &v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + StableService: stableSvc, + CanaryService: canarySvc, + PingPong: pingPong, + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + ALB: &v1alpha1.ALBTrafficRouting{ + Ingress: stableIng, + ServicePort: port, + }, + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: &v1alpha1.IstioVirtualService{ + Name: "istio-vsvc", + }, + DestinationRule: &v1alpha1.IstioDestinationRule{ + Name: "istio-destrule", + CanarySubsetName: "canary", + StableSubsetName: "stable", + }, + }, + }, + }, + }, + }, + } +} + +func TestGetStableAndCanaryServices(t *testing.T) { + // Rollout has no pingPong + rollout := fakeRollout("stable-service", "canary-service", nil, "stable-ingress", 443) + + stableService, canaryService := GetStableAndCanaryServices(rollout, true) + assert.Equal(t, "stable-service", stableService) + assert.Equal(t, "canary-service", canaryService) + + stableService, canaryService = GetStableAndCanaryServices(rollout, false) + assert.Equal(t, "stable-service", stableService) + assert.Equal(t, "canary-service", canaryService) + + // Rollout has pingPong and stable/canary + pp := &v1alpha1.PingPongSpec{PingService: PING_SVC, PongService: PONG_SVC} + rollout = fakeRollout("stable-service", "canary-service", pp, "stable-ingress", 443) + + stableService, canaryService = GetStableAndCanaryServices(rollout, true) + assert.Equal(t, PONG_SVC, stableService) + assert.Equal(t, PING_SVC, canaryService) + + stableService, canaryService = GetStableAndCanaryServices(rollout, false) + assert.Equal(t, "stable-service", stableService) + assert.Equal(t, "canary-service", canaryService) + + // Rollout has pingPong, no stable/canary + rollout = fakeRollout("", "", pp, "stable-ingress", 443) + + stableService, canaryService = GetStableAndCanaryServices(rollout, true) + assert.Equal(t, PONG_SVC, stableService) + assert.Equal(t, PING_SVC, canaryService) + + stableService, canaryService = GetStableAndCanaryServices(rollout, false) + assert.Equal(t, PONG_SVC, stableService) + assert.Equal(t, PING_SVC, canaryService) +} diff --git a/rollout/trafficrouting/traefik/mocks/traefik.go b/rollout/trafficrouting/traefik/mocks/traefik.go index aedfd659cd..b0cd361809 100644 --- a/rollout/trafficrouting/traefik/mocks/traefik.go +++ b/rollout/trafficrouting/traefik/mocks/traefik.go @@ -35,10 +35,10 @@ var ( ErrorTraefikServiceObj *unstructured.Unstructured ) -func (f *FakeRecorder) Eventf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) Eventf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...any) { } -func (f *FakeRecorder) Warnf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) Warnf(object runtime.Object, opts argoRecord.EventOptions, messageFmt string, args ...any) { } func (f *FakeRecorder) K8sRecorder() record.EventRecorder { diff --git a/rollout/trafficrouting/traefik/traefik.go b/rollout/trafficrouting/traefik/traefik.go index f5b507207f..8b5cfb7312 100644 --- a/rollout/trafficrouting/traefik/traefik.go +++ b/rollout/trafficrouting/traefik/traefik.go @@ -25,7 +25,7 @@ const TraefikServiceUpdateError = "TraefikServiceUpdateError" var ( apiGroupToResource = map[string]string{ - defaults.DefaultTraefikAPIGroup: traefikServices, + defaults.GetTraefikAPIGroup(): traefikServices, } ) @@ -68,8 +68,8 @@ func NewDynamicClient(di dynamic.Interface, namespace string) dynamic.ResourceIn } func GetMappingGVR() schema.GroupVersionResource { - group := defaults.DefaultTraefikAPIGroup - parts := strings.Split(defaults.DefaultTraefikVersion, "/") + group := defaults.GetTraefikAPIGroup() + parts := strings.Split(defaults.GetTraefikVersion(), "/") version := parts[len(parts)-1] resourceName := apiGroupToResource[group] return schema.GroupVersionResource{ @@ -134,10 +134,10 @@ func (r *Reconciler) SetWeight(desiredWeight int32, additionalDestinations ...v1 return err } -func getService(serviceName string, services []interface{}) (map[string]interface{}, error) { - var selectedService map[string]interface{} +func getService(serviceName string, services []any) (map[string]any, error) { + var selectedService map[string]any for _, service := range services { - typedService, ok := service.(map[string]interface{}) + typedService, ok := service.(map[string]any) if !ok { return nil, errors.New("Failed type assertion setting weight for traefik service") } diff --git a/rollout/trafficrouting/traefik/traefik_test.go b/rollout/trafficrouting/traefik/traefik_test.go index d7dbbbd297..0b5b0c9701 100644 --- a/rollout/trafficrouting/traefik/traefik_test.go +++ b/rollout/trafficrouting/traefik/traefik_test.go @@ -38,10 +38,6 @@ metadata: name: mocks-service ` -var ( - client *mocks.FakeClient = &mocks.FakeClient{} -) - const ( stableServiceName string = "stable-rollout" fakeStableServiceName string = "fake-stable-rollout" @@ -67,7 +63,7 @@ func TestUpdateHash(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -84,10 +80,9 @@ func TestSetWeight(t *testing.T) { mocks.ErrorTraefikServiceObj = toUnstructured(t, errorTraefikService) t.Run("SetWeight", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -114,7 +109,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithError", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), Client: &mocks.FakeClient{ @@ -131,7 +125,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorManifest", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), Client: &mocks.FakeClient{ @@ -148,10 +141,9 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorStableName", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(fakeStableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -163,10 +155,9 @@ func TestSetWeight(t *testing.T) { }) t.Run("SetWeightWithErrorCanaryName", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, fakeCanaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -178,7 +169,6 @@ func TestSetWeight(t *testing.T) { }) t.Run("TraefikUpdateError", func(t *testing.T) { // Given - t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), Client: &mocks.FakeClient{ @@ -202,7 +192,7 @@ func TestSetHeaderRoute(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -231,7 +221,7 @@ func TestSetMirrorRoute(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -269,7 +259,7 @@ func TestVerifyWeight(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -289,7 +279,7 @@ func TestType(t *testing.T) { t.Parallel() cfg := ReconcilerConfig{ Rollout: newRollout(stableServiceName, canaryServiceName, traefikServiceName), - Client: client, + Client: &mocks.FakeClient{}, } r := NewReconciler(&cfg) @@ -305,7 +295,7 @@ func TestGetService(t *testing.T) { t.Run("ErrorGetServiceFromStruct ", func(t *testing.T) { // Given t.Parallel() - services := []interface{}{ + services := []any{ mocks.FakeService{Weight: 12}, } @@ -319,12 +309,12 @@ func TestGetService(t *testing.T) { t.Run("ErrorGetServiceFromMap", func(t *testing.T) { // Given t.Parallel() - services := map[string]interface{}{ + services := map[string]any{ "weight": 100, } // When - selectedServices, err := getService("default", []interface{}{services}) + selectedServices, err := getService("default", []any{services}) // Then assert.Nil(t, selectedServices) @@ -334,12 +324,12 @@ func TestGetService(t *testing.T) { // Given t.Parallel() const serviceName string = "default" - services := map[string]interface{}{ + services := map[string]any{ "name": serviceName, } // When - selectedServices, err := getService(serviceName, []interface{}{services}) + selectedServices, err := getService(serviceName, []any{services}) // Then assert.NotNil(t, selectedServices) @@ -348,12 +338,12 @@ func TestGetService(t *testing.T) { t.Run("ErrorGetServiceFromNil", func(t *testing.T) { // Given t.Parallel() - services := map[string]interface{}{ + services := map[string]any{ "name": nil, } // When - selectedServices, err := getService("default", []interface{}{services}) + selectedServices, err := getService("default", []any{services}) // Then assert.Nil(t, selectedServices) diff --git a/rollout/trafficrouting_test.go b/rollout/trafficrouting_test.go index a8f5520ea2..f358a18f64 100644 --- a/rollout/trafficrouting_test.go +++ b/rollout/trafficrouting_test.go @@ -122,7 +122,7 @@ func TestReconcileTrafficRoutingVerifyWeightFalse(t *testing.T) { f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), nil) c, i, k8sI := f.newController(noResyncPeriodFunc) enqueued := false - c.enqueueRolloutAfter = func(obj interface{}, duration time.Duration) { + c.enqueueRolloutAfter = func(obj any, duration time.Duration) { enqueued = true } f.expectPatchRolloutAction(ro) @@ -130,6 +130,53 @@ func TestReconcileTrafficRoutingVerifyWeightFalse(t *testing.T) { assert.True(t, enqueued) } +func TestReconcileTrafficRoutingVerifyWeightEndOfRollout(t *testing.T) { + f := newFixture(t) + defer f.Close() + + steps := []v1alpha1.CanaryStep{ + { + SetWeight: pointer.Int32Ptr(10), + }, + { + Pause: &v1alpha1.RolloutPause{}, + }, + } + r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(2), intstr.FromInt(1), intstr.FromInt(0)) + r2 := bumpVersion(r1) + r2.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{} + r2.Spec.Strategy.Canary.CanaryService = "canary" + r2.Spec.Strategy.Canary.StableService = "stable" + + rs1 := newReplicaSetWithStatus(r1, 10, 10) + rs2 := newReplicaSetWithStatus(r2, 10, 10) + + rs1PodHash := rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + rs2PodHash := rs2.Labels[v1alpha1.DefaultRolloutUniqueLabelKey] + canarySelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs2PodHash} + stableSelector := map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: rs1PodHash} + canarySvc := newService("canary", 80, canarySelector, r2) + stableSvc := newService("stable", 80, stableSelector, r2) + + f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) + f.replicaSetLister = append(f.replicaSetLister, rs1, rs2) + + r2 = updateCanaryRolloutStatus(r2, rs1PodHash, 10, 0, 10, false) + f.rolloutLister = append(f.rolloutLister, r2) + f.objects = append(f.objects, r2) + + f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() + f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("SetWeight", mock.Anything, mock.Anything).Return(func(desiredWeight int32, additionalDestinations ...v1alpha1.WeightDestination) error { + // make sure SetWeight was called with correct value + assert.Equal(t, int32(100), desiredWeight) + return nil + }) + f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(pointer.BoolPtr(false), nil) + f.runExpectError(getKey(r2, t), true) +} + func TestRolloutUseDesiredWeight(t *testing.T) { f := newFixture(t) defer f.Close() @@ -249,13 +296,21 @@ func TestRolloutWithExperimentStep(t *testing.T) { SpecRef: "canary", Replicas: pointer.Int32Ptr(1), Weight: pointer.Int32Ptr(5), - }}, + }, + { + Name: "experiment-template-without-weight", + SpecRef: "stable", + Replicas: pointer.Int32Ptr(1), + }}, }, }, } r1 := newCanaryRollout("foo", 10, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0)) r2 := bumpVersion(r1) - r2.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{} + r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{ + SMI: &v1alpha1.SMITrafficRouting{}, + } + r2.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{SMI: &v1alpha1.SMITrafficRouting{}} r2.Spec.Strategy.Canary.CanaryService = "canary" r2.Spec.Strategy.Canary.StableService = "stable" @@ -273,7 +328,12 @@ func TestRolloutWithExperimentStep(t *testing.T) { Name: "experiment-template", ServiceName: "experiment-service", PodTemplateHash: rs2PodHash, - }} + }, + { + Name: "experiment-template-without-weight", + ServiceName: "experiment-service-without-weight", + PodTemplateHash: rs2PodHash, + }} r2.Status.Canary.CurrentExperiment = ex.Name f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc) @@ -293,18 +353,13 @@ func TestRolloutWithExperimentStep(t *testing.T) { // make sure SetWeight was called with correct value assert.Equal(t, int32(10), desiredWeight) assert.Equal(t, int32(5), weightDestinations[0].Weight) + assert.Len(t, weightDestinations, 1) assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) return nil }) f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) - f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - assert.Equal(t, int32(10), desiredWeight) - assert.Equal(t, int32(5), weightDestinations[0].Weight) - assert.Equal(t, ex.Status.TemplateStatuses[0].ServiceName, weightDestinations[0].ServiceName) - assert.Equal(t, ex.Status.TemplateStatuses[0].PodTemplateHash, weightDestinations[0].PodTemplateHash) - return nil - }) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything, mock.Anything).Return(pointer.BoolPtr(true), nil) f.run(getKey(r2, t)) }) @@ -319,11 +374,7 @@ func TestRolloutWithExperimentStep(t *testing.T) { return nil }) f.fakeTrafficRouting.On("SetHeaderRoute", mock.Anything, mock.Anything).Return(nil) - f.fakeTrafficRouting.On("VerifyWeight", mock.Anything).Return(func(desiredWeight int32, weightDestinations ...v1alpha1.WeightDestination) error { - assert.Equal(t, int32(10), desiredWeight) - assert.Len(t, weightDestinations, 0) - return nil - }) + f.fakeTrafficRouting.On("VerifyWeight", mock.Anything, mock.Anything).Return(pointer.BoolPtr(true), nil) f.run(getKey(r2, t)) }) } @@ -1213,10 +1264,10 @@ func TestDontWeightToZeroWhenDynamicallyRollingBackToStable(t *testing.T) { f.rolloutLister = append(f.rolloutLister, r2) f.objects = append(f.objects, r2) - f.expectUpdateReplicaSetAction(rs1) // Updates the revision annotation from 1 to 3 - f.expectUpdateReplicaSetAction(rs1) // repeat of the above (not sure why) - scaleUpIndex := f.expectUpdateReplicaSetAction(rs1) // this one scales the stable RS to 10 - f.expectPatchRolloutAction(r2) + f.expectUpdateReplicaSetAction(rs1) // Updates the revision annotation from 1 to 3 from func isScalingEvent + f.expectUpdateRolloutAction(r2) // Update the rollout revision from 1 to 3 + scaleUpIndex := f.expectUpdateReplicaSetAction(rs1) // Scale The replicaset from 1 to 10 from func scaleReplicaSet + f.expectPatchRolloutAction(r2) // Updates the rollout status from the scaling to 10 action f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler() f.fakeTrafficRouting.On("UpdateHash", mock.Anything, mock.Anything, mock.Anything).Return(func(canaryHash, stableHash string, additionalDestinations ...v1alpha1.WeightDestination) error { diff --git a/server/server.go b/server/server.go index cbb742333d..3482634491 100644 --- a/server/server.go +++ b/server/server.go @@ -290,15 +290,15 @@ func (s *ArgoRolloutsServer) WatchRolloutInfos(q *rollout.RolloutInfoListQuery, rolloutUpdateChan := make(chan *v1alpha1.Rollout) rolloutInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { rolloutUpdateChan <- obj.(*v1alpha1.Rollout) }, - UpdateFunc: func(oldObj, newObj interface{}) { + UpdateFunc: func(oldObj, newObj any) { rolloutUpdateChan <- newObj.(*v1alpha1.Rollout) }, }) podsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { podUpdated(obj.(*corev1.Pod), rsLister, rolloutsLister, rolloutUpdateChan) }, }) diff --git a/service/service.go b/service/service.go index f81c82c771..be143de83d 100644 --- a/service/service.go +++ b/service/service.go @@ -82,7 +82,7 @@ type Controller struct { resyncPeriod time.Duration metricServer *metrics.MetricsServer - enqueueRollout func(obj interface{}) + enqueueRollout func(obj any) } // NewController returns a new service controller @@ -103,7 +103,7 @@ func NewController(cfg ControllerConfig) *Controller { } util.CheckErr(cfg.RolloutsInformer.Informer().AddIndexers(cache.Indexers{ - serviceIndexName: func(obj interface{}) (strings []string, e error) { + serviceIndexName: func(obj any) (strings []string, e error) { if ro := unstructuredutil.ObjectToRollout(obj); ro != nil { return serviceutil.GetRolloutServiceKeys(ro), nil } @@ -112,17 +112,17 @@ func NewController(cfg ControllerConfig) *Controller { })) cfg.ServicesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + AddFunc: func(obj any) { controllerutil.Enqueue(obj, cfg.ServiceWorkqueue) }, - UpdateFunc: func(oldObj, newObj interface{}) { + UpdateFunc: func(oldObj, newObj any) { controllerutil.Enqueue(newObj, cfg.ServiceWorkqueue) }, - DeleteFunc: func(obj interface{}) { + DeleteFunc: func(obj any) { controllerutil.Enqueue(obj, cfg.ServiceWorkqueue) }, }) - controller.enqueueRollout = func(obj interface{}) { + controller.enqueueRollout = func(obj any) { controllerutil.EnqueueRateLimited(obj, cfg.RolloutWorkqueue) } diff --git a/service/service_test.go b/service/service_test.go index 69dea462da..35722443ac 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -86,7 +86,7 @@ func newFakeServiceController(svc *corev1.Service, rollout *v1alpha1.Rollout) (* MetricsServer: metricsServer, }) enqueuedObjects := map[string]int{} - c.enqueueRollout = func(obj interface{}) { + c.enqueueRollout = func(obj any) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { diff --git a/test/e2e/apisix_test.go b/test/e2e/apisix_test.go index 1c8d2de3a3..dfdea9ce60 100644 --- a/test/e2e/apisix_test.go +++ b/test/e2e/apisix_test.go @@ -4,13 +4,14 @@ package e2e import ( + "testing" + "time" + a6 "github.com/argoproj/argo-rollouts/rollout/trafficrouting/apisix" "github.com/argoproj/argo-rollouts/test/fixtures" "github.com/stretchr/testify/suite" "github.com/tj/assert" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "testing" - "time" ) const ( @@ -132,7 +133,7 @@ func (s *APISIXSuite) check(t *fixtures.Then, stableWeight int64, canaryWeight i assert.NoError(s.T(), err) for _, backend := range backends { - typedBackend, ok := backend.(map[string]interface{}) + typedBackend, ok := backend.(map[string]any) assert.Equal(s.T(), ok, true) nameOfCurrentBackend, isFound, err := unstructured.NestedString(typedBackend, "serviceName") assert.NoError(s.T(), err) @@ -165,14 +166,14 @@ func (s *APISIXSuite) checkSetHeader(t *fixtures.Then, stableWeight int64, canar apisixHttpRouteObj, err := a6.GetHttpRoute(apisixHttpRoutesObj, apisixRouteName) assert.NoError(s.T(), err) - exprs, isFound, err := unstructured.NestedSlice(apisixHttpRouteObj.(map[string]interface{}), "match", "exprs") + exprs, isFound, err := unstructured.NestedSlice(apisixHttpRouteObj.(map[string]any), "match", "exprs") assert.NoError(s.T(), err) assert.Equal(s.T(), isFound, true) assert.Equal(s.T(), 1, len(exprs)) expr := exprs[0] - exprObj, ok := expr.(map[string]interface{}) + exprObj, ok := expr.(map[string]any) assert.Equal(s.T(), ok, true) op, isFound, err := unstructured.NestedString(exprObj, "op") diff --git a/test/e2e/appmesh_test.go b/test/e2e/appmesh_test.go index c2ab47f290..4d173e8c56 100644 --- a/test/e2e/appmesh_test.go +++ b/test/e2e/appmesh_test.go @@ -1,3 +1,4 @@ +//go:build e2e // +build e2e package e2e @@ -71,12 +72,12 @@ func (s *AppMeshSuite) getWeightedTargets(uVr *unstructured.Unstructured) map[st result := make(map[string]weightedTargets) routesI, _, _ := unstructured.NestedSlice(uVr.Object, "spec", "routes") for _, rI := range routesI { - route, _ := rI.(map[string]interface{}) + route, _ := rI.(map[string]any) routeName, _ := route["name"].(string) wtsI, _, _ := unstructured.NestedSlice(route, "httpRoute", "action", "weightedTargets") wtStruct := weightedTargets{} for _, wtI := range wtsI { - wt, _ := wtI.(map[string]interface{}) + wt, _ := wtI.(map[string]any) vnodeName, _, _ := unstructured.NestedString(wt, "virtualNodeRef", "name") weight, _, _ := unstructured.NestedInt64(wt, "weight") fmt.Printf("Found wt %+v with vnodeName (%s), weight (%d)", wt, vnodeName, weight) diff --git a/test/e2e/aws_test.go b/test/e2e/aws_test.go index f8b0553fe2..fcbb1a2ef5 100644 --- a/test/e2e/aws_test.go +++ b/test/e2e/aws_test.go @@ -94,6 +94,71 @@ func (s *AWSSuite) TestALBPingPongUpdate() { Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)) } +// Rollout uses both alb and mesh for trafficRouting. +// also uses both pingpong service and stable/canary services +// Expecting: * alb is using pingpong +// - mesh is using stable/canary +func (s *AWSSuite) TestALBMesh_PingPong_StableCanary_Update() { + s.Given(). + RolloutObjects("@functional/albmesh-pingpong-stablecanary-rollout.yaml"). + When().ApplyManifests().WaitForRolloutStatus("Healthy"). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) + assert.Equal(s.T(), "stable-service", vsvc.Spec.HTTP[0].Route[0].Destination.Host) + assert.Equal(s.T(), "canary-service", vsvc.Spec.HTTP[0].Route[1].Destination.Host) + }). + // Update 1. Test the weight switch from ping => pong + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 75, 25)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(75), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(25), vsvc.Spec.HTTP[0].Route[1].Weight) + assert.Equal(s.T(), "stable-service", vsvc.Spec.HTTP[0].Route[0].Destination.Host) + assert.Equal(s.T(), "canary-service", vsvc.Spec.HTTP[0].Route[1].Destination.Host) + }). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 0, 100)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) + assert.Equal(s.T(), "stable-service", vsvc.Spec.HTTP[0].Route[0].Destination.Host) + assert.Equal(s.T(), "canary-service", vsvc.Spec.HTTP[0].Route[1].Destination.Host) + }). + // Update 2. Test the weight switch from pong => ping + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 25, 75)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(75), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(25), vsvc.Spec.HTTP[0].Route[1].Weight) + assert.Equal(s.T(), "stable-service", vsvc.Spec.HTTP[0].Route[0].Destination.Host) + assert.Equal(s.T(), "canary-service", vsvc.Spec.HTTP[0].Route[1].Destination.Host) + }). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) + assert.Equal(s.T(), "stable-service", vsvc.Spec.HTTP[0].Route[0].Destination.Host) + assert.Equal(s.T(), "canary-service", vsvc.Spec.HTTP[0].Route[1].Destination.Host) + }) +} + func (s *AWSSuite) TestALBPingPongUpdateMultiIngress() { s.Given(). RolloutObjects("@functional/alb-pingpong-multi-ingress-rollout.yaml"). @@ -239,6 +304,7 @@ func (s *AWSSuite) TestALBExperimentStepMultiIngress() { } func (s *AWSSuite) TestALBExperimentStepNoSetWeight() { + //TODO: this test is flaky s.Given(). RolloutObjects("@alb/rollout-alb-experiment-no-setweight.yaml"). When(). @@ -272,6 +338,7 @@ func (s *AWSSuite) TestALBExperimentStepNoSetWeight() { } func (s *AWSSuite) TestALBExperimentStepNoSetWeightMultiIngress() { + //TODO: this test is flaky s.Given(). RolloutObjects("@alb/rollout-alb-multi-ingress-experiment-no-setweight.yaml"). When(). diff --git a/test/e2e/canary_test.go b/test/e2e/canary_test.go index bc5e60b6c3..8656aa5c4d 100644 --- a/test/e2e/canary_test.go +++ b/test/e2e/canary_test.go @@ -149,7 +149,10 @@ spec: spec: containers: - name: updatescaling - command: [/bad-command]`). + resources: + requests: + memory: 16Mi + cpu: 2m`). WaitForRolloutReplicas(7). Then(). ExpectCanaryStablePodCount(4, 3). @@ -465,6 +468,31 @@ spec: WaitForRolloutStatus("Healthy") } +// TestCanaryScaleDownDelayWithProgressDeadline verifies that a rollout with a pending scale down doesn't trigger a ProgressDeadlineExceeded event and renders the rollout degraded +func (s *CanarySuite) TestCanaryScaleDownDelayWithProgressDeadline() { + s.Given(). + HealthyRollout(`@functional/canary-scaledowndelay.yaml`). + When(). + UpdateSpec(` +spec: + progressDeadlineSeconds: 5`). + Then(). + When(). + UpdateSpec(` +spec: + template: + metadata: + annotations: + rev: two`). // update to revision 2 + WaitForRolloutStatus("Healthy"). + Sleep(10 * time.Second). // sleep > progressDeadlineSeconds + Then(). + Assert(func(t *fixtures.Then) { + status := string(t.GetRollout().Status.Phase) + assert.Equal(s.T(), "Healthy", status) + }) +} + // TestCanaryScaleDownDelay verifies canary uses a scaleDownDelay when traffic routing is used, // and verifies the annotation is properly managed func (s *CanarySuite) TestCanaryScaleDownDelay() { @@ -565,6 +593,23 @@ func (s *CanarySuite) TestCanaryScaleDownOnAbortNoTrafficRouting() { ExpectRevisionPodCount("2", 0) } +func (s *CanarySuite) TestCanaryWithPausedRollout() { + (s.Given(). + HealthyRollout(`@functional/rollout-canary-with-pause.yaml`). + When(). + ApplyManifests(). + MarkPodsReady("1", 3). // mark all 3 pods ready + WaitForRolloutStatus("Healthy"). + UpdateSpec(). // update to revision 2 + WaitForRolloutStatus("Paused"). + UpdateSpec(). // update to revision 3 + WaitForRolloutStatus("Paused"). + Then(). + ExpectRevisionPodCount("1", 3). + ExpectRevisionPodCount("2", 0). + ExpectRevisionPodCount("3", 1)) +} + func (s *CanarySuite) TestCanaryUnScaleDownOnAbort() { s.Given(). HealthyRollout(`@functional/canary-unscaledownonabort.yaml`). @@ -616,6 +661,7 @@ func (s *CanarySuite) TestCanaryDynamicStableScale() { When(). MarkPodsReady("1", 1). // mark last remaining stable pod as ready (4/4 stable are ready) WaitForRevisionPodCount("2", 0). + Sleep(2*time.Second). //WaitForRevisionPodCount does not wait for terminating pods and so ExpectServiceSelector fails sleep a bit for the terminating pods to be deleted Then(). // Expect that the canary service selector is now set to stable because of dynamic stable scale is over and we have all pods up on stable rs ExpectServiceSelector("dynamic-stable-scale-canary", map[string]string{"app": "dynamic-stable-scale", "rollouts-pod-template-hash": "868d98995b"}, false). diff --git a/test/e2e/functional/albmesh-pingpong-stablecanary-rollout.yaml b/test/e2e/functional/albmesh-pingpong-stablecanary-rollout.yaml new file mode 100644 index 0000000000..0476b1076e --- /dev/null +++ b/test/e2e/functional/albmesh-pingpong-stablecanary-rollout.yaml @@ -0,0 +1,139 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ping-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +apiVersion: v1 +kind: Service +metadata: + name: pong-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +--- +apiVersion: v1 +kind: Service +metadata: + name: stable-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +apiVersion: v1 +kind: Service +metadata: + name: canary-service +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: alb-canary +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: alb-canary-ingress + annotations: + kubernetes.io/ingress.class: alb +spec: + rules: + - http: + paths: + - path: /* + backend: + service: + name: alb-rollout-root + port: + name: use-annotation + pathType: ImplementationSpecific +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: alb-canary +spec: + replicas: 2 + selector: + matchLabels: + app: alb-canary + template: + metadata: + labels: + app: alb-canary + spec: + containers: + - name: alb-canary + image: "argoproj/rollouts-demo:red" + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m + strategy: + canary: + scaleDownDelaySeconds: 2 + canaryService: canary-service + stableService: stable-service + pingPong: + pingService: ping-service + pongService: pong-service + trafficRouting: + alb: + ingress: alb-canary-ingress + rootService: alb-rollout-root + servicePort: 80 + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setWeight: 25 + - pause: {} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split + http: + - name: primary + route: + - destination: + host: stable-service + weight: 100 + - destination: + host: canary-service + weight: 0 diff --git a/test/e2e/functional/analysistemplate-sleep-job.yaml b/test/e2e/functional/analysistemplate-sleep-job.yaml index 86faa2c877..4fdd369dee 100644 --- a/test/e2e/functional/analysistemplate-sleep-job.yaml +++ b/test/e2e/functional/analysistemplate-sleep-job.yaml @@ -6,7 +6,7 @@ metadata: spec: args: - name: duration - value: 0s + value: "0" - name: exit-code value: "0" - name: count diff --git a/test/e2e/functional/rollout-canary-with-pause.yaml b/test/e2e/functional/rollout-canary-with-pause.yaml new file mode 100644 index 0000000000..b92ec9e1c1 --- /dev/null +++ b/test/e2e/functional/rollout-canary-with-pause.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: Service +metadata: + name: rollout-canary-with-pause-root +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-canary-with-pause +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-canary-with-pause-canary +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-canary-with-pause +--- +apiVersion: v1 +kind: Service +metadata: + name: rollout-canary-with-pause-stable +spec: + type: NodePort + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: rollout-canary-with-pause +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-canary-with-pause +spec: + replicas: 3 + revisionHistoryLimit: 3 + progressDeadlineSeconds: 5 + selector: + matchLabels: + app: rollout-canary-with-pause + template: + metadata: + labels: + app: rollout-canary-with-pause + spec: + containers: + - name: rollouts-demo + image: nginx:1.19-alpine + ports: + - containerPort: 80 + readinessProbe: + initialDelaySeconds: 10 + httpGet: + path: / + port: 80 + periodSeconds: 30 + strategy: + canary: + canaryService: rollout-canary-with-pause-canary + stableService: rollout-canary-with-pause-stable + steps: + - setWeight: 20 + - pause: {} + \ No newline at end of file diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index d4b27d3e3f..668c75bb0c 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -166,13 +166,14 @@ spec: UpdateSpec(). WaitForRolloutStatus("Paused"). // At step 1 (pause: {duration: 24h}) PromoteRollout(). - Sleep(2*time.Second). + Sleep(3*time.Second). + WaitForInlineAnalysisRunPhase("Running"). Then(). + ExpectRolloutStatus("Progressing"). // At step 2 (analysis: sleep-job - 24h) + ExpectAnalysisRunCount(1). ExpectRollout("status.currentStepIndex == 1", func(r *v1alpha1.Rollout) bool { return *r.Status.CurrentStepIndex == 1 }). - ExpectRolloutStatus("Progressing"). // At step 2 (analysis: sleep-job - 24h) - ExpectAnalysisRunCount(1). When(). PromoteRollout(). Sleep(2 * time.Second). @@ -205,6 +206,9 @@ spec: prePromotionAnalysis: templates: - templateName: sleep-job + args: + - name: duration + value: "10" postPromotionAnalysis: templates: - templateName: sleep-job @@ -228,11 +232,12 @@ spec: ApplyManifests(). WaitForRolloutStatus("Healthy"). UpdateSpec(). - Sleep(time.Second). + Sleep(5 * time.Second). + WaitForPrePromotionAnalysisRunPhase("Running"). PromoteRolloutFull(). WaitForRolloutStatus("Healthy"). Then(). - ExpectAnalysisRunCount(0) + ExpectAnalysisRunCount(1) } func (s *FunctionalSuite) TestRolloutRestart() { @@ -1313,7 +1318,7 @@ spec: if err != nil { return err } - containers[0] = map[string]interface{}{ + containers[0] = map[string]any{ "name": "rollouts-demo", "image": "argoproj/rollouts-demo:error", } @@ -1333,7 +1338,7 @@ spec: if err != nil { return err } - containers[0] = map[string]interface{}{ + containers[0] = map[string]any{ "name": "rollouts-demo", "image": "argoproj/rollouts-demo:blue", } @@ -1443,3 +1448,170 @@ spec: Then(). ExpectRolloutStatus("Healthy")) } + +func (s *FunctionalSuite) TestScaleDownOnSuccess() { + s.Given(). + RolloutObjects(` +kind: Service +apiVersion: v1 +metadata: + name: rollout-bluegreen-active +spec: + selector: + app: rollout-ref-deployment + ports: + - protocol: TCP + port: 80 + targetPort: 8080 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment + scaleDown: onsuccess + strategy: + blueGreen: + activeService: rollout-bluegreen-active +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: rollout-ref-deployment + template: + metadata: + labels: + app: rollout-ref-deployment + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:green +`). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + ExpectDeploymentReplicasCount("The deployment has been scaled to 0 replicas", "rollout-ref-deployment", 0) +} + +func (s *FunctionalSuite) TestScaleDownProgressively() { + s.Given(). + RolloutObjects(` +kind: Service +apiVersion: v1 +metadata: + name: rollout-bluegreen-active +spec: + selector: + app: rollout-ref-deployment + ports: + - protocol: TCP + port: 80 + targetPort: 8080 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment + scaleDown: progressively + strategy: + blueGreen: + activeService: rollout-bluegreen-active +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: rollout-ref-deployment + template: + metadata: + labels: + app: rollout-ref-deployment + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:green +`). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + ExpectDeploymentReplicasCount("The deployment has been scaled to 0 replicas", "rollout-ref-deployment", 0) +} + +func (s *FunctionalSuite) TestNeverScaleDown() { + s.Given(). + RolloutObjects(` +kind: Service +apiVersion: v1 +metadata: + name: rollout-bluegreen-active + annotations: + rollout.argoproj.io/scale-down: never +spec: + selector: + app: rollout-ref-deployment + ports: + - protocol: TCP + port: 80 + targetPort: 8080 +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: rollout-ref-deployment + scaleDown: never + strategy: + blueGreen: + activeService: rollout-bluegreen-active +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rollout-ref-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: rollout-ref-deployment + template: + metadata: + labels: + app: rollout-ref-deployment + spec: + containers: + - name: rollouts-demo + image: argoproj/rollouts-demo:green +`). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + Then(). + ExpectDeploymentReplicasCount("The deployment has not been scaled", "rollout-ref-deployment", 2) +} diff --git a/test/e2e/istio/istio-host-split-ping-pong.yaml b/test/e2e/istio/istio-host-split-ping-pong.yaml new file mode 100644 index 0000000000..b9eb38ad37 --- /dev/null +++ b/test/e2e/istio/istio-host-split-ping-pong.yaml @@ -0,0 +1,84 @@ +apiVersion: v1 +kind: Service +metadata: + name: pong +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: v1 +kind: Service +metadata: + name: ping +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-host-split + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-host-split-vsvc +spec: + hosts: + - istio-host-split + http: + - name: primary + route: + - destination: + host: ping + weight: 100 + - destination: + host: pong + weight: 0 + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-host-split +spec: + strategy: + canary: + pingPong: + pingService: ping + pongService: pong + trafficRouting: + istio: + virtualService: + name: istio-host-split-vsvc + routes: + - primary + steps: + - setWeight: 25 + - pause: { duration: 5s } + selector: + matchLabels: + app: istio-host-split + template: + metadata: + labels: + app: istio-host-split + spec: + containers: + - name: istio-host-split + image: nginx:1.19-alpine + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m diff --git a/test/e2e/istio/istio-subset-split-in-stable-downscale-after-canary-abort.yaml b/test/e2e/istio/istio-subset-split-in-stable-downscale-after-canary-abort.yaml new file mode 100644 index 0000000000..2a67c5b5a9 --- /dev/null +++ b/test/e2e/istio/istio-subset-split-in-stable-downscale-after-canary-abort.yaml @@ -0,0 +1,101 @@ +apiVersion: v1 +kind: Service +metadata: + name: istio-subset-split-in-stable-downscale-after-canary-abort +spec: + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app: istio-subset-split-in-stable-downscale-after-canary-abort + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: istio-subset-split-in-stable-downscale-after-canary-abort-vsvc +spec: + hosts: + - istio-subset-split-in-stable-downscale-after-canary-abort + http: + - route: + - destination: + host: istio-subset-split-in-stable-downscale-after-canary-abort + subset: stable + weight: 100 + - destination: + host: istio-subset-split-in-stable-downscale-after-canary-abort + subset: canary + weight: 0 + +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-subset-split-in-stable-downscale-after-canary-abort-destrule +spec: + host: istio-subset-split-in-stable-downscale-after-canary-abort + subsets: + - name: stable + labels: + app: istio-subset-split-in-stable-downscale-after-canary-abort + - name: canary + labels: + app: istio-subset-split-in-stable-downscale-after-canary-abort + +--- +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: istio-subset-split-in-stable-downscale-after-canary-abort +spec: + replicas: 4 + strategy: + canary: + dynamicStableScale: true + trafficRouting: + istio: + virtualService: + name: istio-subset-split-in-stable-downscale-after-canary-abort-vsvc + destinationRule: + name: istio-subset-split-in-stable-downscale-after-canary-abort-destrule + canarySubsetName: canary + stableSubsetName: stable + canaryMetadata: + labels: + role: canary + stableMetadata: + labels: + role: stable + maxSurge: "25%" + maxUnavailable: "20%" + steps: + - setWeight: 10 + - pause: {} + - setWeight: 20 + - pause: {} + - setWeight: 30 + - pause: {} + - setWeight: 50 + - pause: {} + selector: + matchLabels: + app: istio-subset-split-in-stable-downscale-after-canary-abort + template: + metadata: + labels: + app: istio-subset-split-in-stable-downscale-after-canary-abort + spec: + containers: + - name: istio-subset-split-in-stable-downscale-after-canary-abort + image: nginx:1.19-alpine + ports: + - name: http + containerPort: 80 + protocol: TCP + resources: + requests: + memory: 16Mi + cpu: 5m diff --git a/test/e2e/istio_test.go b/test/e2e/istio_test.go index 7ecbd66fdf..9797b3e503 100644 --- a/test/e2e/istio_test.go +++ b/test/e2e/istio_test.go @@ -492,3 +492,80 @@ func (s *IstioSuite) TestIstioSubsetSplitExperimentStep() { s.TearDownSuite() } + +func (s *IstioSuite) TestIstioPingPongUpdate() { + s.Given(). + RolloutObjects("@istio/istio-host-split-ping-pong.yaml"). + When().ApplyManifests().WaitForRolloutStatus("Healthy"). + Then(). + //Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) + }). + // Update 1. Test the weight switch from ping => pong + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + //Assert(assertWeights(s, "ping-service", "pong-service", 75, 25)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(75), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(25), vsvc.Spec.HTTP[0].Route[1].Weight) + }). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + //Assert(assertWeights(s, "ping-service", "pong-service", 0, 100)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[1].Weight) + }). + // Update 2. Test the weight switch from pong => ping + When().UpdateSpec(). + WaitForRolloutCanaryStepIndex(1).Sleep(1 * time.Second).Then(). + //Assert(assertWeights(s, "ping-service", "pong-service", 25, 75)). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(25), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(75), vsvc.Spec.HTTP[0].Route[1].Weight) + }). + When().PromoteRollout(). + WaitForRolloutStatus("Healthy"). + Sleep(1 * time.Second). + Then(). + //Assert(assertWeights(s, "ping-service", "pong-service", 100, 0)) + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + assert.Equal(s.T(), int64(100), vsvc.Spec.HTTP[0].Route[0].Weight) + assert.Equal(s.T(), int64(0), vsvc.Spec.HTTP[0].Route[1].Weight) + }) +} + +func (s *IstioSuite) TestIstioSubsetSplitInStableDownscaleAfterCanaryAbort() { + s.Given(). + RolloutObjects("@istio/istio-subset-split-in-stable-downscale-after-canary-abort.yaml"). + When(). + ApplyManifests(). + WaitForRolloutStatus("Healthy"). + PromoteRolloutFull(). + UpdateSpec(). + WaitForRolloutStatus("Paused"). + AbortRollout(). + WaitForRolloutStatus("Degraded"). + ScaleRollout(1). + WaitForRolloutReplicas(1). + Then(). + Assert(func(t *fixtures.Then) { + vsvc := t.GetVirtualService() + stableWeight := vsvc.Spec.HTTP[0].Route[0].Weight + canaryWeight := vsvc.Spec.HTTP[0].Route[1].Weight + + assert.Equal(s.T(), int64(0), canaryWeight) + assert.Equal(s.T(), int64(100), stableWeight) + }) + + s.TearDownSuite() +} diff --git a/test/fixtures/e2e_suite.go b/test/fixtures/e2e_suite.go index a774afcf94..e78ae1aa68 100644 --- a/test/fixtures/e2e_suite.go +++ b/test/fixtures/e2e_suite.go @@ -54,7 +54,7 @@ const ( ) var ( - E2EWaitTimeout time.Duration = time.Second * 120 + E2EWaitTimeout time.Duration = time.Second * 90 E2EPodDelay = 0 E2EALBIngressAnnotations map[string]string @@ -143,8 +143,8 @@ func (s *E2ESuite) SetupSuite() { restConfig, err := config.ClientConfig() s.CheckError(err) s.Common.kubernetesHost = restConfig.Host - restConfig.Burst = defaults.DefaultBurst * 2 - restConfig.QPS = defaults.DefaultQPS * 2 + restConfig.Burst = defaults.DefaultBurst * 10 + restConfig.QPS = defaults.DefaultQPS * 10 s.namespace, _, err = config.Namespace() s.CheckError(err) s.kubeClient, err = kubernetes.NewForConfig(restConfig) diff --git a/test/fixtures/given.go b/test/fixtures/given.go index 19e7552f60..57e17251c6 100644 --- a/test/fixtures/given.go +++ b/test/fixtures/given.go @@ -44,7 +44,7 @@ func (g *Given) SetSteps(text string) *Given { steps := make([]rov1.CanaryStep, 0) err := yaml.Unmarshal([]byte(text), &steps) g.CheckError(err) - var stepsUn []interface{} + var stepsUn []any for _, step := range steps { stepUn, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&step) g.CheckError(err) diff --git a/test/fixtures/then.go b/test/fixtures/then.go index eac4c9419b..8183cc2adc 100644 --- a/test/fixtures/then.go +++ b/test/fixtures/then.go @@ -11,6 +11,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rov1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -54,7 +55,7 @@ func (t *Then) ExpectRolloutStatus(expectedStatus string) *Then { return t } -func (t *Then) ExpectReplicaCounts(desired, current, updated, ready, available interface{}) *Then { +func (t *Then) ExpectReplicaCounts(desired, current, updated, ready, available any) *Then { ro, err := t.rolloutClient.ArgoprojV1alpha1().Rollouts(t.namespace).Get(t.Context, t.rollout.GetName(), metav1.GetOptions{}) t.CheckError(err) if desired != nil && desired.(int) != int(defaults.GetReplicasOrDefault(ro.Spec.Replicas)) { @@ -504,3 +505,30 @@ func (t *Then) Given() *Given { Common: t.Common, } } + +type DeploymentExpectation func(*appsv1.Deployment) bool + +func (t *Then) ExpectDeploymentReplicasCount(expectation string, deploymentName string, expectedReplicaCount int) *Then { + t.t.Helper() + checkDeploymentReplicas := func() (done bool, err error) { + deployment, err := t.kubeClient.AppsV1().Deployments(t.namespace).Get(t.Context, deploymentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + if deployment.Spec.Replicas != nil && *deployment.Spec.Replicas == int32(expectedReplicaCount) { + t.log.Infof("Deployment replicas count expectation '%s' met", expectation) + return true, nil + } + t.log.Errorf("Deployment replicas count expectation '%s' failed. Expected: %d, Actual: %d", expectation, expectedReplicaCount, *deployment.Spec.Replicas) + return false, nil + } + + pollInterval := 5 * time.Second + pollTimeout := 1 * time.Minute + if err := wait.PollImmediate(pollInterval, pollTimeout, checkDeploymentReplicas); err != nil { + t.log.Errorf("Failed to meet deployment replicas count expectation '%s'", expectation) + t.t.FailNow() + } + + return t +} diff --git a/test/fixtures/when.go b/test/fixtures/when.go index 0fa77610f4..d9696a7761 100644 --- a/test/fixtures/when.go +++ b/test/fixtures/when.go @@ -90,7 +90,7 @@ func (w *When) injectDelays(un *unstructured.Unstructured) { w.CheckError(err) containersIf, _, err := unstructured.NestedSlice(un.Object, "spec", "template", "spec", "containers") w.CheckError(err) - container := containersIf[0].(map[string]interface{}) + container := containersIf[0].(map[string]any) container["lifecycle"] = lifecycleObj containersIf[0] = container err = unstructured.SetNestedSlice(un.Object, containersIf, "spec", "template", "spec", "containers") @@ -105,7 +105,7 @@ func (w *When) injectImagePrefix(un *unstructured.Unstructured) { } containersIf, _, err := unstructured.NestedSlice(un.Object, "spec", "template", "spec", "containers") w.CheckError(err) - container := containersIf[0].(map[string]interface{}) + container := containersIf[0].(map[string]any) container["image"] = imagePrefix + container["image"].(string) containersIf[0] = container err = unstructured.SetNestedSlice(un.Object, containersIf, "spec", "template", "spec", "containers") @@ -245,7 +245,7 @@ func (w *When) PatchSpec(patch string) *When { w.t.Fatal("Rollout not set") } // convert YAML patch to JSON patch - var patchObj map[string]interface{} + var patchObj map[string]any err := yaml.Unmarshal([]byte(patch), &patchObj) w.CheckError(err) jsonPatch, err := json.Marshal(patchObj) diff --git a/test/kustomize/rollout/kustomization.yaml b/test/kustomize/rollout/kustomization.yaml index 71a3660e3c..6f451758d9 100644 --- a/test/kustomize/rollout/kustomization.yaml +++ b/test/kustomize/rollout/kustomization.yaml @@ -45,15 +45,15 @@ images: openapi: path: https://raw.githubusercontent.com/argoproj/argo-schema-generator/main/schema/argo_all_k8s_kustomize_schema.json -patchesStrategicMerge: -- |- - apiVersion: argoproj.io/v1alpha1 - kind: Rollout - metadata: - name: guestbook - spec: - template: - spec: - containers: - - name: guestbook - image: guestbook-patched:v1 +patches: +- patch: |- + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + metadata: + name: guestbook + spec: + template: + spec: + containers: + - name: guestbook + image: guestbook-patched:v1 diff --git a/test/util/util.go b/test/util/util.go index 071aca3f7b..5f5c20d5c7 100644 --- a/test/util/util.go +++ b/test/util/util.go @@ -16,7 +16,7 @@ import ( // ObjectFromYAML returns a runtime.Object from a yaml string func ObjectFromYAML(yamlStr string) *unstructured.Unstructured { - obj := make(map[string]interface{}) + obj := make(map[string]any) err := yaml.Unmarshal([]byte(yamlStr), &obj) if err != nil { panic(err) diff --git a/ui/jest.config.js b/ui/jest.config.js new file mode 100644 index 0000000000..7548de4101 --- /dev/null +++ b/ui/jest.config.js @@ -0,0 +1,8 @@ +module.exports = { + roots: ['/src'], + testMatch: ['**/?(*.)+(spec|test).+(ts|tsx|js)'], + transform: { + '^.+\\.(ts|tsx)$': 'ts-jest', + }, + modulePathIgnorePatterns: ['generated'], +}; diff --git a/ui/package.json b/ui/package.json index e7d8217969..0a631d3773 100644 --- a/ui/package.json +++ b/ui/package.json @@ -4,10 +4,11 @@ "private": true, "dependencies": { "@fortawesome/fontawesome-svg-core": "^6.4.0", + "@fortawesome/free-regular-svg-icons": "^6.4.0", "@fortawesome/free-solid-svg-icons": "^6.4.0", "@fortawesome/react-fontawesome": "^0.2.0", "antd": "^5.4.2", - "argo-ui": "git+https://github.com/argoproj/argo-ui.git", + "argo-ui": "git+https://github.com/argoproj/argo-ui.git#5ff344ac9692c14dd108468bd3c020c3c75181cb", "classnames": "2.2.6", "isomorphic-fetch": "^3.0.0", "moment": "^2.29.4", @@ -19,14 +20,15 @@ "react-hot-loader": "^3.1.3", "react-keyhooks": "^0.2.3", "react-router-dom": "5.2.0", + "recharts": "^2.9.0", "rxjs": "^6.6.6", "typescript": "^5.0.4", "web-vitals": "^1.0.1" }, "scripts": { "start": "webpack serve --config ./src/app/webpack.dev.js", - "build": "rm -rf dist && NODE_OPTIONS=--openssl-legacy-provider webpack --config ./src/app/webpack.prod.js", - "test": "react-scripts test", + "build": "rm -rf dist && webpack --config ./src/app/webpack.prod.js", + "test": "jest", "eject": "react-scripts eject", "protogen": "../hack/swagger-codegen.sh generate -i ../pkg/apiclient/rollout/rollout.swagger.json -l typescript-fetch -o src/models/rollout/generated" }, @@ -53,17 +55,22 @@ "@testing-library/react": "^11.1.0", "@testing-library/user-event": "^12.1.10", "@types/classnames": "2.2.9", - "@types/jest": "^26.0.15", + "@types/jest": "^29.5.10", "@types/node": "^12.0.0", "@types/react": "^16.9.3", "@types/react-dom": "^16.9.3", "@types/react-helmet": "^6.1.0", "@types/react-router-dom": "^5.1.7", + "@types/react-form": "^2.16.1", + "@types/uuid": "^9.0.3", + "@types/react-autocomplete": "^1.8.4", "copy-webpack-plugin": "^6.3.2", + "jest": "^29.7.0", "mini-css-extract-plugin": "^1.3.9", "raw-loader": "^4.0.2", "react-scripts": "4.0.3", "sass": "^1.32.8", + "ts-jest": "^29.1.1", "ts-loader": "^8.0.17", "webpack-bundle-analyzer": "^4.4.0", "webpack-cli": "^4.5.0", diff --git a/ui/src/app/App.tsx b/ui/src/app/App.tsx index 60ba5419c6..bcdb94b9df 100644 --- a/ui/src/app/App.tsx +++ b/ui/src/app/App.tsx @@ -7,7 +7,7 @@ import './App.scss'; import {NamespaceContext, RolloutAPI} from './shared/context/api'; import {Modal} from './components/modal/modal'; import {Rollout} from './components/rollout/rollout'; -import {RolloutsList} from './components/rollouts-list/rollouts-list'; +import {RolloutsHome} from './components/rollouts-home/rollouts-home'; import {Shortcut, Shortcuts} from './components/shortcuts/shortcuts'; import {ConfigProvider} from 'antd'; import {theme} from '../config/theme'; @@ -33,7 +33,12 @@ const Page = (props: {path: string; component: React.ReactNode; exact?: boolean; pageHasShortcuts={!!props.shortcuts} showHelp={() => { if (props.shortcuts) { - setShowShortcuts(true); + setShowShortcuts(!showShortcuts); + } + }} + hideHelp={() => { + if (props.shortcuts) { + setShowShortcuts(false); } }} /> @@ -84,7 +89,7 @@ const App = () => { } + component={} shortcuts={[ {key: '/', description: 'Search'}, {key: 'TAB', description: 'Search, navigate search items'}, diff --git a/ui/src/app/components/analysis-modal/analysis-modal.tsx b/ui/src/app/components/analysis-modal/analysis-modal.tsx new file mode 100644 index 0000000000..e5394995da --- /dev/null +++ b/ui/src/app/components/analysis-modal/analysis-modal.tsx @@ -0,0 +1,81 @@ +import * as React from 'react'; +import {Modal, Tabs} from 'antd'; +import {RolloutAnalysisRunInfo} from '../../../models/rollout/generated'; + +import MetricLabel from './metric-label/metric-label'; +import {MetricPanel, SummaryPanel} from './panels'; +import {analysisEndTime, analysisStartTime, getAdjustedMetricPhase, metricStatusLabel, metricSubstatus, transformMetrics} from './transforms'; +import {AnalysisStatus} from './types'; + +import classNames from 'classnames'; +import './styles.scss'; + +const cx = classNames; + +interface AnalysisModalProps { + analysis: RolloutAnalysisRunInfo; + analysisName: string; + images: string[]; + onClose: () => void; + open: boolean; + revision: string; +} + +export const AnalysisModal = ({analysis, analysisName, images, onClose, open, revision}: AnalysisModalProps) => { + const analysisResults = analysis.specAndStatus?.status; + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + const analysisStart = analysisStartTime(analysis.objectMeta?.creationTimestamp); + const analysisEnd = analysisEndTime(analysisResults?.metricResults ?? []); + + const analysisSubstatus = metricSubstatus( + (analysisResults?.phase ?? AnalysisStatus.Unknown) as AnalysisStatus, + analysisResults?.runSummary.failed ?? 0, + analysisResults?.runSummary.error ?? 0, + analysisResults?.runSummary.inconclusive ?? 0 + ); + const transformedMetrics = transformMetrics(analysis.specAndStatus); + + const adjustedAnalysisStatus = getAdjustedMetricPhase(analysis.status as AnalysisStatus); + + const tabItems = [ + { + label: , + key: 'analysis-summary', + children: ( + + ), + }, + ...Object.values(transformedMetrics) + .sort((a, b) => a.name.localeCompare(b.name)) + .map((metric) => ({ + label: , + key: metric.name, + children: ( + + ), + })), + ]; + + return ( + + + + ); +}; diff --git a/ui/src/app/components/analysis-modal/constants.ts b/ui/src/app/components/analysis-modal/constants.ts new file mode 100644 index 0000000000..c1a35ce0d5 --- /dev/null +++ b/ui/src/app/components/analysis-modal/constants.ts @@ -0,0 +1,15 @@ +import {AnalysisStatus, FunctionalStatus} from './types'; + +export const METRIC_FAILURE_LIMIT_DEFAULT = 0; +export const METRIC_INCONCLUSIVE_LIMIT_DEFAULT = 0; +export const METRIC_CONSECUTIVE_ERROR_LIMIT_DEFAULT = 4; + +export const ANALYSIS_STATUS_THEME_MAP: {[key in AnalysisStatus]: string} = { + Successful: FunctionalStatus.SUCCESS, + Error: FunctionalStatus.WARNING, + Failed: FunctionalStatus.ERROR, + Running: FunctionalStatus.IN_PROGRESS, + Pending: FunctionalStatus.INACTIVE, + Inconclusive: FunctionalStatus.WARNING, + Unknown: FunctionalStatus.INACTIVE, // added by frontend +}; diff --git a/ui/src/app/components/analysis-modal/criteria-list/criteria-list.scss b/ui/src/app/components/analysis-modal/criteria-list/criteria-list.scss new file mode 100644 index 0000000000..8427680d0c --- /dev/null +++ b/ui/src/app/components/analysis-modal/criteria-list/criteria-list.scss @@ -0,0 +1,19 @@ +@import '../theme/theme.scss'; + +.criteria-list { + margin: 0; + padding-left: 0; + list-style-type: none; +} + +.icon-pass { + color: $success-foreground; +} + +.icon-fail { + color: $error-foreground; +} + +.icon-pending { + color: $in-progress-foreground; +} diff --git a/ui/src/app/components/analysis-modal/criteria-list/criteria-list.tsx b/ui/src/app/components/analysis-modal/criteria-list/criteria-list.tsx new file mode 100644 index 0000000000..51247b6d22 --- /dev/null +++ b/ui/src/app/components/analysis-modal/criteria-list/criteria-list.tsx @@ -0,0 +1,116 @@ +import * as React from 'react'; +import {Space, Typography} from 'antd'; + +import {AnalysisStatus} from '../types'; +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; + +import {faCheck, faRotateRight, faXmark} from '@fortawesome/free-solid-svg-icons'; + +import classNames from 'classnames'; +import './criteria-list.scss'; + +const {Text} = Typography; + +enum CriterionStatus { + Fail = 'FAIL', + Pass = 'PASS', + InProgress = 'IN_PROGRESS', + Pending = 'PENDING', +} + +const defaultCriterionStatus = (analysisStatus: AnalysisStatus) => (analysisStatus === AnalysisStatus.Pending ? CriterionStatus.Pending : CriterionStatus.InProgress); + +const criterionLabel = (measurementLabel: string, maxAllowed: number) => (maxAllowed === 0 ? `No ${measurementLabel}.` : `Fewer than ${maxAllowed + 1} ${measurementLabel}.`); + +interface CriteriaListItemProps { + children: React.ReactNode; + showIcon: boolean; + status: CriterionStatus; +} + +const CriteriaListItem = ({children, showIcon, status}: CriteriaListItemProps) => { + let StatusIcon: React.ReactNode | null = null; + switch (status) { + case CriterionStatus.Fail: { + StatusIcon = ; + break; + } + case CriterionStatus.Pass: { + StatusIcon = ; + break; + } + case CriterionStatus.InProgress: { + StatusIcon = ; + break; + } + case CriterionStatus.Pending: + default: { + break; + } + } + + return ( +
  • + + {showIcon && <>{StatusIcon}} + {children} + +
  • + ); +}; + +interface CriteriaListProps { + analysisStatus: AnalysisStatus; + className?: string[] | string; + consecutiveErrors: number; + failures: number; + inconclusives: number; + maxConsecutiveErrors: number; + maxFailures: number; + maxInconclusives: number; + showIcons: boolean; +} + +const CriteriaList = ({ + analysisStatus, + className, + consecutiveErrors, + failures, + inconclusives, + maxConsecutiveErrors, + maxFailures, + maxInconclusives, + showIcons, +}: CriteriaListProps) => { + let failureStatus = defaultCriterionStatus(analysisStatus); + let errorStatus = defaultCriterionStatus(analysisStatus); + let inconclusiveStatus = defaultCriterionStatus(analysisStatus); + + if (analysisStatus !== AnalysisStatus.Pending && analysisStatus !== AnalysisStatus.Running) { + failureStatus = failures <= maxFailures ? CriterionStatus.Pass : CriterionStatus.Fail; + errorStatus = consecutiveErrors <= maxConsecutiveErrors ? CriterionStatus.Pass : CriterionStatus.Fail; + inconclusiveStatus = inconclusives <= maxInconclusives ? CriterionStatus.Pass : CriterionStatus.Fail; + } + + return ( +
      + {maxFailures > -1 && ( + + {criterionLabel('measurement failures', maxFailures)} + + )} + {maxConsecutiveErrors > -1 && ( + + {criterionLabel('consecutive measurement errors', maxConsecutiveErrors)} + + )} + {maxInconclusives > -1 && ( + + {criterionLabel('inconclusive measurements', maxInconclusives)} + + )} +
    + ); +}; + +export default CriteriaList; diff --git a/ui/src/app/components/analysis-modal/header/header.scss b/ui/src/app/components/analysis-modal/header/header.scss new file mode 100644 index 0000000000..36909a0e71 --- /dev/null +++ b/ui/src/app/components/analysis-modal/header/header.scss @@ -0,0 +1,7 @@ +.icon { + font-size: 14px; +} + +h4.title { + margin: 0; // antd override +} diff --git a/ui/src/app/components/analysis-modal/header/header.tsx b/ui/src/app/components/analysis-modal/header/header.tsx new file mode 100644 index 0000000000..e329daeda2 --- /dev/null +++ b/ui/src/app/components/analysis-modal/header/header.tsx @@ -0,0 +1,38 @@ +import * as React from 'react'; + +import {Space, Typography} from 'antd'; +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {faMagnifyingGlassChart} from '@fortawesome/free-solid-svg-icons'; + +import StatusIndicator from '../status-indicator/status-indicator'; +import {AnalysisStatus, FunctionalStatus} from '../types'; + +import classNames from 'classnames/bind'; +import './header.scss'; + +const {Text, Title} = Typography; +const cx = classNames; + +interface HeaderProps { + className?: string[] | string; + status: AnalysisStatus; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; + subtitle?: string; + title: string; +} + +const Header = ({className, status, substatus, subtitle, title}: HeaderProps) => ( + + + + +
    + + {title} + + {subtitle && {subtitle}} +
    +
    +); + +export default Header; diff --git a/ui/src/app/components/analysis-modal/legend/legend.tsx b/ui/src/app/components/analysis-modal/legend/legend.tsx new file mode 100644 index 0000000000..e861c8cf75 --- /dev/null +++ b/ui/src/app/components/analysis-modal/legend/legend.tsx @@ -0,0 +1,44 @@ +import * as React from 'react'; + +import {Space, Typography} from 'antd'; + +import {AnalysisStatus} from '../types'; +import StatusIndicator from '../status-indicator/status-indicator'; + +import classNames from 'classnames'; + +const {Text} = Typography; + +interface LegendItemProps { + label: string; + status: AnalysisStatus; +} + +const LegendItem = ({label, status}: LegendItemProps) => ( + + + {label} + +); + +const pluralize = (count: number, singular: string, plural: string) => (count === 1 ? singular : plural); + +interface LegendProps { + className?: string[] | string; + errors: number; + failures: number; + inconclusives: number; + successes: number; +} + +const Legend = ({className, errors, failures, inconclusives, successes}: LegendProps) => ( + + + + + {inconclusives > 0 && } + +); + +export default Legend; +export {LegendItem}; diff --git a/ui/src/app/components/analysis-modal/metric-chart/metric-chart.scss b/ui/src/app/components/analysis-modal/metric-chart/metric-chart.scss new file mode 100644 index 0000000000..953b10e06c --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-chart/metric-chart.scss @@ -0,0 +1,79 @@ +@import '../theme/theme.scss'; + +@mixin chartDot($background, $foreground) { + fill: $background; + stroke: $foreground; +} + +.metric-chart svg { + overflow: visible; +} + +.metric-chart-tooltip { + background: white; + max-width: 350px; + padding: 8px; + box-shadow: $shadow-1; +} + +.metric-chart-tooltip-timestamp { + margin-left: 16px; +} + +.metric-chart-tooltip-status { + display: flex; + + > :first-child { + margin: 4px 4px 0 0; + } +} + +.chart-axis text, +.chart-label > tspan { + stroke: $gray-10; + font-size: 11px; + font-family: $font-family-primary; + font-weight: 100; +} + +.dot-ERROR { + @include chartDot($error-background, $error-foreground); +} +.dot-INACTIVE { + @include chartDot($inactive-background, $inactive-foreground); +} +.dot-IN_PROGRESS { + @include chartDot($in-progress-background, $in-progress-foreground); +} +.dot-SUCCESS { + @include chartDot($success-background, $success-foreground); +} +.dot-WARNING { + @include chartDot($warning-background, $warning-foreground); +} + +.chart-line > path { + stroke: $gray-11; +} + +.reference-line { + &.is-ERROR > line { + stroke: $error-foreground; + } + &.is-SUCCESS > line { + stroke: $success-foreground; + } +} + +.reference-area { + > path { + opacity: 0.3; + } + + &.is-ERROR > path { + fill: $error-background; + } + &.is-SUCCESS > path { + fill: $success-background; + } +} diff --git a/ui/src/app/components/analysis-modal/metric-chart/metric-chart.tsx b/ui/src/app/components/analysis-modal/metric-chart/metric-chart.tsx new file mode 100644 index 0000000000..f912acddbf --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-chart/metric-chart.tsx @@ -0,0 +1,161 @@ +// eslint-disable-file @typescript-eslint/ban-ts-comment +import * as React from 'react'; +import * as moment from 'moment'; +import {CartesianGrid, DotProps, Label, Line, LineChart, ReferenceLine, ResponsiveContainer, Tooltip, TooltipProps, XAxis, YAxis} from 'recharts'; +import {NameType, ValueType} from 'recharts/types/component/DefaultTooltipContent'; +import {Typography} from 'antd'; + +import {AnalysisStatus, FunctionalStatus, TransformedMeasurement} from '../types'; +import {ANALYSIS_STATUS_THEME_MAP} from '../constants'; +import {isValidDate} from '../transforms'; + +import StatusIndicator from '../status-indicator/status-indicator'; + +import classNames from 'classnames/bind'; +import './metric-chart.scss'; + +const {Text} = Typography; +const cx = classNames; + +const CHART_HEIGHT = 254; +const X_AXIS_HEIGHT = 45; + +const defaultValueFormatter = (value: number | string | null) => (value === null ? '' : value.toString()); + +const timeTickFormatter = (axisData?: string) => { + if (axisData === undefined || !isValidDate(axisData)) { + return ''; + } + return moment(axisData).format('LT'); +}; + +type MeasurementDotProps = DotProps & { + payload?: { + phase: AnalysisStatus; + startedAt: string; + value: string | null; + }; +}; + +const MeasurementDot = ({cx, cy, payload}: MeasurementDotProps) => ( + +); + +type TooltipContentProps = TooltipProps & { + conditionKeys: string[]; + valueFormatter: (value: number | string | null) => string; +}; + +const TooltipContent = ({active, conditionKeys, payload, valueFormatter}: TooltipContentProps) => { + if (!active || payload?.length === 0 || !payload?.[0].payload) { + return null; + } + + const data = payload[0].payload; + let label; + if (data.phase === AnalysisStatus.Error) { + label = data.message ?? 'Measurement error'; + } else if (conditionKeys.length > 0) { + const sublabels = conditionKeys.map((cKey) => (conditionKeys.length > 1 ? `${valueFormatter(data.chartValue[cKey])} (${cKey})` : valueFormatter(data.chartValue[cKey]))); + label = sublabels.join(' , '); + } else { + label = valueFormatter(data.chartValue); + } + + return ( +
    + + {moment(data.startedAt).format('LTS')} + +
    + + {label} +
    +
    + ); +}; + +interface MetricChartProps { + className?: string[] | string; + conditionKeys: string[]; + data: TransformedMeasurement[]; + failThresholds: number[] | null; + max: number | null; + min: number | null; + successThresholds: number[] | null; + valueFormatter?: (value: number | string | null) => string; + yAxisFormatter?: (value: any, index: number) => string; + yAxisLabel: string; +} + +const MetricChart = ({ + className, + conditionKeys, + data, + failThresholds, + max, + min, + successThresholds, + valueFormatter = defaultValueFormatter, + yAxisFormatter = defaultValueFormatter, + yAxisLabel, +}: MetricChartProps) => { + // show ticks at boundaries of analysis + // @ts-ignore + const startingTick = data[0]?.startedAt ?? ''; + // @ts-ignore + const endingTick = data[data.length - 1]?.finishedAt ?? ''; + const timeTicks: any[] = [startingTick, endingTick]; + + return ( + + + + + + + } filterNull={false} isAnimationActive={true} /> + {failThresholds !== null && ( + <> + {failThresholds.map((threshold) => ( + + ))} + + )} + {successThresholds !== null && ( + <> + {successThresholds.map((threshold) => ( + + ))} + + )} + {conditionKeys.length === 0 ? ( + } + /> + ) : ( + <> + {conditionKeys.map((cKey) => ( + } /> + ))} + + )} + + + ); +}; + +export default MetricChart; diff --git a/ui/src/app/components/analysis-modal/metric-label/metric-label.scss b/ui/src/app/components/analysis-modal/metric-label/metric-label.scss new file mode 100644 index 0000000000..33b0f965fc --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-label/metric-label.scss @@ -0,0 +1,9 @@ +.metric-label { + display: block; + width: 140px; + min-width: 140px; + text-align: left; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} diff --git a/ui/src/app/components/analysis-modal/metric-label/metric-label.tsx b/ui/src/app/components/analysis-modal/metric-label/metric-label.tsx new file mode 100644 index 0000000000..5725f17cdc --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-label/metric-label.tsx @@ -0,0 +1,28 @@ +import * as React from 'react'; + +import {Space} from 'antd'; + +import {AnalysisStatus, FunctionalStatus} from '../types'; +import StatusIndicator from '../status-indicator/status-indicator'; + +import classNames from 'classnames/bind'; +import './metric-label.scss'; + +const cx = classNames; + +interface AnalysisModalProps { + label: string; + status: AnalysisStatus; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; +} + +const MetricLabel = ({label, status, substatus}: AnalysisModalProps) => ( + + + + {label} + + +); + +export default MetricLabel; diff --git a/ui/src/app/components/analysis-modal/metric-table/metric-table.scss b/ui/src/app/components/analysis-modal/metric-table/metric-table.scss new file mode 100644 index 0000000000..69a19025bb --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-table/metric-table.scss @@ -0,0 +1,32 @@ +@import '../theme/theme.scss'; + +.metric-table { + border: 1px solid $gray-4; +} + +.error-message { + font-style: italic; +} + +.condition { + display: flex; + align-items: center; + justify-content: flex-end; + margin-top: $space-unit; + font-size: 12px; + + &::before { + content: ''; + display: block; + height: 2px; + width: 12px; + margin-right: $space-unit; + } + + &.is-ERROR::before { + background: $error-foreground; + } + &.is-SUCCESS::before { + background: $success-foreground; + } +} diff --git a/ui/src/app/components/analysis-modal/metric-table/metric-table.tsx b/ui/src/app/components/analysis-modal/metric-table/metric-table.tsx new file mode 100644 index 0000000000..1fdbd1363d --- /dev/null +++ b/ui/src/app/components/analysis-modal/metric-table/metric-table.tsx @@ -0,0 +1,66 @@ +import * as React from 'react'; +import * as moment from 'moment'; +import {Table, Typography} from 'antd'; + +import {AnalysisStatus, TransformedMeasurement, TransformedValueObject} from '../types'; +import StatusIndicator from '../status-indicator/status-indicator'; +import {isValidDate} from '../transforms'; + +import classNames from 'classnames/bind'; +import './metric-table.scss'; + +const {Column} = Table; +const {Text} = Typography; + +const timeColFormatter = (startTime?: string) => (isValidDate(startTime) ? moment(startTime).format('LTS') : ''); + +const isObject = (tValue: TransformedValueObject | number | string | null) => typeof tValue === 'object' && !Array.isArray(tValue) && tValue !== null; + +const columnValueLabel = (value: any, valueKey: string) => (isObject(value) && valueKey in (value as TransformedValueObject) ? (value as TransformedValueObject)[valueKey] : ''); + +interface MetricTableProps { + className?: string[] | string; + conditionKeys: string[]; + data: TransformedMeasurement[]; + failCondition: string | null; + successCondition: string | null; +} + +const MetricTable = ({className, conditionKeys, data, failCondition, successCondition}: MetricTableProps) => ( +
    + + } align='center' /> + {conditionKeys.length > 0 ? ( + <> + {conditionKeys.map((cKey) => ( + { + const isError = columnValue.phase === AnalysisStatus.Error; + const errorMessage = columnValue.message ?? 'Measurement error'; + const label = isError ? errorMessage : columnValueLabel(columnValue.tableValue, cKey); + return {label}; + }} + /> + ))} + + ) : ( + + )} + {timeColFormatter(startedAt)}} /> +
    + {failCondition !== null && ( + + Failure condition: {failCondition} + + )} + {successCondition !== null && ( + + Success condition: {successCondition} + + )} +
    +); + +export default MetricTable; diff --git a/ui/src/app/components/analysis-modal/panels/index.tsx b/ui/src/app/components/analysis-modal/panels/index.tsx new file mode 100644 index 0000000000..5636632889 --- /dev/null +++ b/ui/src/app/components/analysis-modal/panels/index.tsx @@ -0,0 +1,2 @@ +export {default as MetricPanel} from './metric-panel'; +export {default as SummaryPanel} from './summary-panel'; diff --git a/ui/src/app/components/analysis-modal/panels/metric-panel.tsx b/ui/src/app/components/analysis-modal/panels/metric-panel.tsx new file mode 100644 index 0000000000..b1deba1c6f --- /dev/null +++ b/ui/src/app/components/analysis-modal/panels/metric-panel.tsx @@ -0,0 +1,137 @@ +// eslint-disable-file @typescript-eslint/ban-ts-comment +import * as React from 'react'; + +import {Radio, Typography} from 'antd'; +import type {RadioChangeEvent} from 'antd'; +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {faChartLine, faList} from '@fortawesome/free-solid-svg-icons'; + +import Header from '../header/header'; +import CriteriaList from '../criteria-list/criteria-list'; +import Legend from '../legend/legend'; +import MetricChart from '../metric-chart/metric-chart'; +import MetricTable from '../metric-table/metric-table'; +import QueryBox from '../query-box/query-box'; +import {AnalysisStatus, FunctionalStatus, TransformedMetricSpec, TransformedMetricStatus} from '../types'; +import {isFiniteNumber} from '../transforms'; +import {METRIC_CONSECUTIVE_ERROR_LIMIT_DEFAULT, METRIC_FAILURE_LIMIT_DEFAULT, METRIC_INCONCLUSIVE_LIMIT_DEFAULT} from '../constants'; + +import classNames from 'classnames'; +import './styles.scss'; + +const cx = classNames; + +const {Paragraph, Title} = Typography; + +interface MetricPanelProps { + className?: string[] | string; + metricName: string; + metricSpec?: TransformedMetricSpec; + metricResults: TransformedMetricStatus; + status: AnalysisStatus; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; +} + +const MetricPanel = ({className, metricName, metricSpec, metricResults, status, substatus}: MetricPanelProps) => { + const consecutiveErrorLimit = isFiniteNumber(metricSpec.consecutiveErrorLimit ?? null) ? metricSpec.consecutiveErrorLimit : METRIC_CONSECUTIVE_ERROR_LIMIT_DEFAULT; + const failureLimit = isFiniteNumber(metricSpec.failureLimit ?? null) ? metricSpec.failureLimit : METRIC_FAILURE_LIMIT_DEFAULT; + const inconclusiveLimit = isFiniteNumber(metricSpec.inconclusiveLimit ?? null) ? metricSpec.inconclusiveLimit : METRIC_INCONCLUSIVE_LIMIT_DEFAULT; + + const canChartMetric = metricResults.chartable && metricResults.chartMax !== null; + + const [selectedView, setSelectedView] = React.useState(canChartMetric ? 'chart' : 'table'); + + const onChangeView = ({target: {value}}: RadioChangeEvent) => { + setSelectedView(value); + }; + + return ( +
    +
    +
    + {canChartMetric && ( + + + + + + + + + )} +
    + {status === AnalysisStatus.Pending && ( + + {metricName} analysis measurements have not yet begun. Measurement information will appear here when it becomes available. + + )} + {status !== AnalysisStatus.Pending && metricResults.transformedMeasurements.length === 0 && ( + Measurement results for {metricName} cannot be displayed. + )} + {status !== AnalysisStatus.Pending && metricResults.transformedMeasurements.length > 0 && ( + <> + + {selectedView === 'chart' && ( + + )} + {selectedView === 'table' && ( + + )} + + )} +
    + + Pass requirements + + 0} + /> +
    + {Array.isArray(metricSpec?.queries) && ( + <> +
    + + {metricSpec.queries.length > 1 ? 'Queries' : 'Query'} + +
    + {metricSpec.queries.map((query) => ( + + ))} + + )} +
    + ); +}; + +export default MetricPanel; diff --git a/ui/src/app/components/analysis-modal/panels/styles.scss b/ui/src/app/components/analysis-modal/panels/styles.scss new file mode 100644 index 0000000000..b0010c55dd --- /dev/null +++ b/ui/src/app/components/analysis-modal/panels/styles.scss @@ -0,0 +1,61 @@ +@import '../theme/theme.scss'; + +// Analysis Panel + +.analysis-header { + margin: $space-unit 0 (3 * $space-unit); +} + +.label { + display: block; +} + +// Metric Panel + +.metric-header { + display: flex; + align-items: center; + justify-content: space-between; + margin: $space-unit 0; +} + +.legend { + display: flex; + justify-content: flex-end; +} + +h5.section-title { + margin-bottom: 0; // antd override +} + +.query-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: $space-unit; +} + +.query-box { + :not(:last-child) { + margin-bottom: $space-small; + } + + :last-child { + margin-bottom: $space-large; + } +} + +// Common + +.summary-section, +.metric-section { + margin-bottom: 3 * $space-unit; + + &.medium-space { + margin-bottom: $space-medium; + } + + &.top-content { + margin-top: $space-unit; + } +} diff --git a/ui/src/app/components/analysis-modal/panels/summary-panel.tsx b/ui/src/app/components/analysis-modal/panels/summary-panel.tsx new file mode 100644 index 0000000000..4b2d7bb829 --- /dev/null +++ b/ui/src/app/components/analysis-modal/panels/summary-panel.tsx @@ -0,0 +1,73 @@ +import * as React from 'react'; +import * as moment from 'moment'; +import {Typography} from 'antd'; + +import {AnalysisStatus, FunctionalStatus} from '../types'; +import Header from '../header/header'; + +import classNames from 'classnames/bind'; +import './styles.scss'; + +const cx = classNames; + +const {Text} = Typography; + +const timeRangeFormatter = (start: number, end: number | null) => { + const startFormatted = moment(start).format('LLL'); + if (end === null) { + return `${startFormatted} - present`; + } + const isSameDate = moment(start).isSame(moment(end), 'day'); + const endFormatted = isSameDate ? moment(end).format('LT') : moment(end).format('LLL'); + return `${startFormatted} - ${endFormatted}`; +}; + +interface SummaryPanelProps { + className?: string[] | string; + endTime: number | null; + images: string[]; + message?: string; + revision: string; + startTime: number | null; + status: AnalysisStatus; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; + title: string; +} + +const SummaryPanel = ({className, endTime, images, message, revision, startTime, status, substatus, title}: SummaryPanelProps) => ( +
    +
    + {images.length > 0 && ( +
    + + {images.length > 1 ? `Versions` : `Version`} + + {images.join(', ')} +
    + )} +
    + + Revision + + {revision} +
    + {startTime !== null && ( +
    + + Run time + + {timeRangeFormatter(startTime, endTime)} +
    + )} + {message && ( +
    + + Summary + + {message} +
    + )} +
    +); + +export default SummaryPanel; diff --git a/ui/src/app/components/analysis-modal/query-box/query-box.scss b/ui/src/app/components/analysis-modal/query-box/query-box.scss new file mode 100644 index 0000000000..c12be636b2 --- /dev/null +++ b/ui/src/app/components/analysis-modal/query-box/query-box.scss @@ -0,0 +1,50 @@ +@import '../theme/theme.scss'; + +.query-box { + position: relative; + padding: $space-small 48px $space-small $space-small; + background-color: $gray-2; + border: 1px solid $gray-4; + max-height: 70px; + overflow: hidden; + transition: max-height 0.3s ease-in-out; + + &.can-expand { + cursor: pointer; + } + + .query { + display: -webkit-box; + -webkit-box-orient: vertical; + overflow: auto hidden; + white-space: pre-wrap; + word-wrap: break-word; + text-overflow: ellipsis; + line-clamp: 3; + -webkit-line-clamp: 3; + font-family: $font-family-mono; + font-size: 12px; + margin-bottom: 0; + } + + &.is-expanded { + max-height: 500px; + + .query { + line-clamp: initial; + -webkit-line-clamp: initial; + } + } +} + +.query-copy-button { + position: absolute; + font-size: 18px; + line-height: 1; + top: 8px; + right: 6px; + + svg { + color: $ant-primary; + } +} diff --git a/ui/src/app/components/analysis-modal/query-box/query-box.tsx b/ui/src/app/components/analysis-modal/query-box/query-box.tsx new file mode 100644 index 0000000000..188731c5a8 --- /dev/null +++ b/ui/src/app/components/analysis-modal/query-box/query-box.tsx @@ -0,0 +1,42 @@ +import * as React from 'react'; + +import {Typography} from 'antd'; + +import classNames from 'classnames'; +import './query-box.scss'; + +const {Paragraph} = Typography; + +interface QueryBoxProps { + className?: string[] | string; + query: string; +} + +const QueryBox = ({className, query}: QueryBoxProps) => { + const queryTextRef = React.useRef(null); + const [canExpand, setCanExpand] = React.useState(false); + const [expanded, toggleExpanded] = React.useState(false); + + React.useEffect(() => { + setCanExpand(queryTextRef.current?.offsetHeight !== queryTextRef.current?.scrollHeight); + }, [queryTextRef]); + + const expandQuery = () => { + toggleExpanded(true); + setCanExpand(false); + }; + + return ( +
    +
    +                {query}
    +            
    + +
    + ); +}; + +export default QueryBox; diff --git a/ui/src/app/components/analysis-modal/status-indicator/status-indicator.scss b/ui/src/app/components/analysis-modal/status-indicator/status-indicator.scss new file mode 100644 index 0000000000..35799cf1ff --- /dev/null +++ b/ui/src/app/components/analysis-modal/status-indicator/status-indicator.scss @@ -0,0 +1,84 @@ +@import '../theme/theme.scss'; + +@mixin indicator($background, $foreground) { + background: $background; + border-color: $foreground; + + &, + svg { + color: $foreground; + } +} + +.indicator-wrapper { + position: relative; + border-radius: 50%; +} + +.indicator { + display: flex; + align-items: center; + justify-content: center; + border-radius: 50%; + border-style: solid; + border-width: 1px; + + &.is-small { + width: 14px; + min-width: 14px; + height: 14px; + } + + &.is-large { + width: 28px; + min-width: 28px; + height: 28px; + } + + &, + &.is-INACTIVE { + @include indicator($inactive-background, $inactive-foreground); + } + + &.is-IN_PROGRESS { + @include indicator($in-progress-background, $in-progress-foreground); + } + + &.is-SUCCESS { + @include indicator($success-background, $success-foreground); + } + + &.is-WARNING { + @include indicator($warning-background, $warning-foreground); + } + + &.is-ERROR { + @include indicator($error-background, $error-foreground); + } +} + +.substatus { + position: absolute; + border: 1px solid white; + border-radius: 50%; + top: -3px; + left: -2px; + + &.is-small { + width: 8px; + height: 8px; + } + + &.is-large { + width: 12px; + height: 12px; + } + + &.is-WARNING { + background: $warning-foreground; + } + + &.is-ERROR { + background: $error-foreground; + } +} diff --git a/ui/src/app/components/analysis-modal/status-indicator/status-indicator.tsx b/ui/src/app/components/analysis-modal/status-indicator/status-indicator.tsx new file mode 100644 index 0000000000..5dd30f8e5d --- /dev/null +++ b/ui/src/app/components/analysis-modal/status-indicator/status-indicator.tsx @@ -0,0 +1,26 @@ +import * as React from 'react'; + +import {AnalysisStatus, FunctionalStatus} from '../types'; +import {ANALYSIS_STATUS_THEME_MAP} from '../constants'; + +import classNames from 'classnames'; +import './status-indicator.scss'; + +const cx = classNames; + +interface StatusIndicatorProps { + children?: React.ReactNode; + className?: string[] | string; + size?: 'small' | 'large'; + status: AnalysisStatus; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; +} + +const StatusIndicator = ({children, className, size = 'large', status, substatus}: StatusIndicatorProps) => ( +
    +
    {children}
    + {substatus !== undefined &&
    } +
    +); + +export default StatusIndicator; diff --git a/ui/src/app/components/analysis-modal/styles.scss b/ui/src/app/components/analysis-modal/styles.scss new file mode 100644 index 0000000000..89c15c36fe --- /dev/null +++ b/ui/src/app/components/analysis-modal/styles.scss @@ -0,0 +1,8 @@ +.tabs { + min-height: 550px; + margin-top: 16px !important; // antd override + + .ant-tabs-tab { + padding-left: 0 !important; // antd override + } +} diff --git a/ui/src/app/components/analysis-modal/theme/theme.scss b/ui/src/app/components/analysis-modal/theme/theme.scss new file mode 100644 index 0000000000..0d8f934264 --- /dev/null +++ b/ui/src/app/components/analysis-modal/theme/theme.scss @@ -0,0 +1,35 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +// antd colors +$gray-2: #fafafa; +$gray-4: #f0f0f0; +$gray-5: #d9d9d9; +$gray-6: #bfbfbf; +$gray-7: #8c8c8c; +$gray-10: #262626; +$gray-11: #1f1f1f; +$gray-12: #141414; + +$ant-primary: #44505f; // from config/theme.ts + +$success-background: lighten($argo-success-color-dark, 35); +$success-foreground: $argo-success-color-dark; +$warning-background: lighten($argo-status-warning-color, 25); +$warning-foreground: $argo-status-warning-color; +$error-background: lighten($argo-failed-color, 20); +$error-foreground: $argo-failed-color-dark; +$in-progress-background: lighten($argo-running-color-dark, 40); +$in-progress-foreground: $argo-running-color-dark; +$inactive-background: lighten($argo-waiting-color, 20); +$inactive-foreground: $argo-waiting-color-dark; + +$space-unit: 4px; +$space-small: 8px; +$space-medium: 16px; +$space-large: 24px; + +$shadow-1: 0 6px 16px 0 rgba(0, 0, 0, 0.08), 0 3px 6px -4px rgba(0, 0, 0, 0.12), 0 9px 28px 8px rgba(0, 0, 0, 0.05); + +$font-family-primary: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, 'Noto Sans', sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', + 'Segoe UI Symbol', 'Noto Color Emoji'; +$font-family-mono: sfmono-regular, Consolas, liberation mono, Menlo, Courier, monospace; diff --git a/ui/src/app/components/analysis-modal/transforms.test.ts b/ui/src/app/components/analysis-modal/transforms.test.ts new file mode 100644 index 0000000000..5c2c2a667e --- /dev/null +++ b/ui/src/app/components/analysis-modal/transforms.test.ts @@ -0,0 +1,548 @@ +// eslint-disable-file @typescript-eslint/ban-ts-comment + +import { + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CloudWatchMetric, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult, +} from '../../../models/rollout/generated'; +import { + analysisEndTime, + analysisStartTime, + argValue, + chartMax, + conditionDetails, + formatKeyValueMeasurement, + formatMultiItemArrayMeasurement, + formatSingleItemArrayMeasurement, + formatThresholdsForChart, + formattedValue, + interpolateQuery, + isChartable, + isValidDate, + metricProvider, + metricStatusLabel, + metricSubstatus, + printableCloudWatchQuery, + printableDatadogQuery, +} from './transforms'; +import {AnalysisStatus, FunctionalStatus} from './types'; + +const MOCK_METRICS_WITHOUT_END_TIMES: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult[] = [ + { + measurements: [], + }, + { + measurements: [{}, {}], + }, +]; + +const MOCK_METRICS_WITH_END_TIMES: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult[] = [ + { + measurements: [ + { + // @ts-ignore + finishedAt: '2023-11-16T00:25:23Z', + }, + { + // @ts-ignore + finishedAt: '2023-11-16T00:26:23Z', + }, + { + // @ts-ignore + finishedAt: '2023-11-16T00:27:23Z', + }, + { + // @ts-ignore + finishedAt: '2023-11-16T00:28:23Z', + }, + ], + }, +]; + +const MOCK_ARGS: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[] = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, + { + name: 'application-name', + value: 'istio-host-split-canary', + }, + { + name: 'cpu-usage-threshold', + }, + { + name: 'success-rate-threshold', + value: '0.95', + }, + { + name: 'latency-threshold', + value: '500', + }, +]; + +const MOCK_PROVIDER_PROMETHEUS: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider = { + prometheus: { + address: 'https://prometheus-k8s.monitoring:9090', + query: 'sum(irate(istio_requests_total{destination_service_name=~"{{args.service-name}}",response_code!~"5.*"}[1m])) \n/\nsum(irate(istio_requests_total{destination_service_name=~"{{args.service-name}}"}[1m]))', + }, +}; +const MOCK_PROVIDER_NEWRELIC: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider = { + newRelic: { + query: "FROM Transaction SELECT percentage(count(*), WHERE httpResponseCode != 500) as successRate where appName = '{{ args.application-name }}'", + }, +}; +const MOCK_PROVIDER_DATADOG_V2_1: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider = { + datadog: { + apiVersion: 'v2', + query: 'sum:requests.errors{service:{{args.service-name}}}.as_count()', + formula: "moving_rollup(a, 60, 'sum') / b", + }, +}; +const MOCK_PROVIDER_DATADOG_V2_2: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider = { + datadog: { + apiVersion: 'v2', + queries: { + a: 'sum:requests.errors{service:{{args.service-name}}}.as_count()', + b: 'sum:requests{service:{{args.service-name}}}.as_count()', + }, + formula: "moving_rollup(a, 60, 'sum') / b", + }, +}; + +const MOCK_PROVIDER_CLOUDWATCH: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CloudWatchMetric = { + metricDataQueries: [ + { + id: 'rate', + expression: 'errors / requests', + }, + { + id: 'errors', + metricStat: { + metric: { + namespace: 'app', + metricName: 'errors', + }, + stat: 'Sum', + unit: 'Count', + }, + returnData: false, + }, + { + id: 'requests', + metricStat: { + metric: { + namespace: 'app', + metricName: 'requests', + }, + stat: 'Sum', + unit: 'Count', + }, + returnData: false, + }, + ], +}; + +const MOCK_ARGS_PROMETHEUS = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, +]; +const MOCK_QUERY_PROMETHEUS = + 'sum(irate(istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m])) / sum(irate(istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m]))'; + +const MOCK_ARGS_NEWRELIC = [{name: 'application-name', value: 'myApp'}]; +const MOCK_QUERY_NEWRELIC = "FROM Transaction SELECT percentage(count(*), WHERE httpResponseCode != 500) as successRate where appName = '{{ args.application-name }}'"; + +const MOCK_ARGS_DATADOG = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, +]; +const MOCK_QUERY_DATADOG = 'sum:requests.error.rate{service:{{args.service-name}}}'; + +const MOCK_ARGS_WAVEFRONT = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, +]; +const MOCK_QUERY_WAVEFRONT = + 'sum(rate(5m, ts("istio.requestcount.count", response_code!=500 and destination_service="{{args.service-name}}"))) / sum(rate(5m, ts("istio.requestcount.count", reporter=client and destination_service="{{args.service-name}}")))'; + +const MOCK_QUERY_GRAPHITE = + "target=summarize(asPercent(sumSeries(stats.timers.httpServerRequests.app.{{args.service-name}}.exception.*.method.*.outcome.{CLIENT_ERROR,INFORMATIONAL,REDIRECTION,SUCCESS}.status.*.uri.*.count), sumSeries(stats.timers.httpServerRequests.app.{{args.service-name}}.exception.*.method.*.outcome.*.status.*.uri.*.count)),'5min','avg')"; +const MOCK_ARGS_GRAPHITE = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, +]; + +const MOCK_QUERY_INFLUXDB = + 'from(bucket: "app_istio") range(start: -15m) filter(fn: (r) => r["destination_workload"] == "{{ args.application-name }}")|> filter(fn: (r) => r["_measurement"] == "istio:istio_requests_errors_percentage:rate1m:5xx")'; +const MOCK_ARGS_INFLUXDB = [{name: 'application-name', value: 'myApp'}]; + +const MOCK_QUERY_SKYWALKING = + 'query queryData($duration: Duration!) { service_apdex: readMetricsValues(condition: { name: "service_apdex", entity: { scope: Service, serviceName: "{{ args.service-name }}", normal: true } }, duration: $duration) { label values { values { value } } } }'; +const MOCK_ARGS_SKYWALKING = [ + { + name: 'service-name', + value: 'istio-host-split-canary', + }, +]; + +const MOCK_CONDITION_1 = 'result[0] < .95'; +const MOCK_CONDITION_2 = 'result[0] > .5 && result[0] < .95'; +const MOCK_CONDITION_3 = 'result.successRate >= 0.95'; +const MOCK_CONDITION_4 = 'result.successRate >= {{ args.success-rate-threshold }}'; +const MOCK_CONDITION_5 = 'result.successRate >= {{ args.success-rate-threshold }} && result.errorRate <= 0.1'; + +describe('analysis modal transforms', () => { + beforeAll(() => {}); + afterAll(() => {}); + + test('isValidDate() for undefined', () => { + expect(isValidDate()).toBe(false); + }); + test('isValidDate() for a non-date recognized string', () => { + expect(isValidDate('abcd')).toBe(false); + }); + test('isValidDate() for a date recognized string', () => { + expect(isValidDate('2023-11-16T00:25:23Z')).toBe(true); + }); + + test('analysisStartTime() for undefined', () => { + expect(analysisStartTime()).toBeNull(); + }); + test('analysisStartTime() for a non-date recognized string', () => { + expect(analysisStartTime('abcd')).toBeNull(); + }); + test('analysisStartTime() for a date recognized string', () => { + expect(analysisStartTime('2023-11-16T00:25:23Z')).toBe(1700094323000); + }); + + test('analysisEndTime() for no metric results', () => { + expect(analysisEndTime([])).toBe(null); + }); + test('analysisEndTime() for analysis with metrics but no measurements', () => { + expect(analysisEndTime(MOCK_METRICS_WITHOUT_END_TIMES)).toBe(null); + }); + test('analysisEndTime() for measurements with finishedAt times', () => { + expect(analysisEndTime(MOCK_METRICS_WITH_END_TIMES)).toBe(1700094503000); + }); + + test('argValue() for empty args', () => { + expect(argValue([], 'cpu-threhold')).toBeNull(); + }); + test('argValue() for missing arg name / value', () => { + expect(argValue(MOCK_ARGS, 'memory-threshold')).toBeNull(); + }); + test('argValue() for missing arg value', () => { + expect(argValue(MOCK_ARGS, 'cpu-usage-treshold')).toBeNull(); + }); + test('argValue() for present arg name / value', () => { + expect(argValue(MOCK_ARGS, 'latency-threshold')).toBe('500'); + }); + + test('metricProvider() for known provider', () => { + expect(metricProvider(MOCK_PROVIDER_PROMETHEUS)).toBe('prometheus'); + }); + + test('conditionDetails with missing condition', () => { + expect(conditionDetails(undefined, MOCK_ARGS, MOCK_PROVIDER_PROMETHEUS)).toEqual({ + label: null, + thresholds: [], + conditionKeys: [], + }); + }); + test('conditionDetails() with missing provider', () => { + expect(conditionDetails(MOCK_CONDITION_1, MOCK_ARGS)).toEqual({ + label: null, + thresholds: [], + conditionKeys: [], + }); + }); + test('conditionDetails() for unsupported format', () => { + expect(conditionDetails('result in resultsArray', MOCK_ARGS, MOCK_PROVIDER_PROMETHEUS)).toEqual({ + label: 'result in resultsArray', + thresholds: [], + conditionKeys: [], + }); + }); + test('conditionDetails() with missing args', () => { + expect(conditionDetails(MOCK_CONDITION_1, undefined, MOCK_PROVIDER_PROMETHEUS)).toEqual({ + conditionKeys: ['0'], + label: 'result[0] < .95', + thresholds: [0.95], + }); + }); + test('conditionDetails() for condition like result[0] [>, <] [number]', () => { + expect(conditionDetails(MOCK_CONDITION_1, MOCK_ARGS, MOCK_PROVIDER_PROMETHEUS)).toEqual({ + conditionKeys: ['0'], + label: 'result[0] < .95', + thresholds: [0.95], + }); + }); + test('conditionDetails() for multiple conditions like result[0] [>, <] [number] && result[0] [>, <] [number]', () => { + expect(conditionDetails(MOCK_CONDITION_2, MOCK_ARGS, MOCK_PROVIDER_PROMETHEUS)).toEqual({ + conditionKeys: ['0'], + label: 'result[0] > .5 && result[0] < .95', + thresholds: [0.5, 0.95], + }); + }); + test('conditionDetails() for condition like result.[key] [>, <] [number]', () => { + expect(conditionDetails(MOCK_CONDITION_3, MOCK_ARGS, MOCK_PROVIDER_NEWRELIC)).toEqual({ + conditionKeys: ['successRate'], + label: 'result.successRate >= 0.95', + thresholds: [0.95], + }); + }); + test('conditionDetails() for condition like result.[key] [>, <] [arg value]', () => { + expect(conditionDetails(MOCK_CONDITION_4, MOCK_ARGS, MOCK_PROVIDER_NEWRELIC)).toEqual({ + conditionKeys: ['successRate'], + label: 'result.successRate >= 0.95', + thresholds: [0.95], + }); + }); + test('conditionDetails() for multiple condition like result.[key1] [>, <] [arg value] && result.[key2] [>, <] [number]', () => { + expect(conditionDetails(MOCK_CONDITION_5, MOCK_ARGS, MOCK_PROVIDER_NEWRELIC)).toEqual({ + conditionKeys: ['successRate', 'errorRate'], + label: 'result.successRate >= 0.95 && result.errorRate <= 0.1', + thresholds: [0.95, 0.1], + }); + }); + + test('formatThresholdsForChart() with number values', () => { + expect(formatThresholdsForChart([0, 1.1, 2.22, 3.333, 4.4444, 5.55555])).toEqual([0, 1.1, 2.22, 3.33, 4.44, 5.56]); + }); + + test('chartMax() for 0 max value and null thresholds', () => { + expect(chartMax(0, null, null)).toBe(1); + }); + test('chartMax() for 1 max value and null thresholds', () => { + expect(chartMax(1, null, null)).toBe(1.2); + }); + test('chartMax() for max value and thresholds that are the same', () => { + expect(chartMax(2, [2], [2])).toBe(2.4); + }); + test('chartMax() for max value that is above thresholds', () => { + expect(chartMax(4, [2, 3], [1, 2])).toBe(4.8); + }); + test('chartMax() for fail threshold that is above value and success threshold', () => { + expect(chartMax(2, [2, 3, 4], [1, 2])).toBe(4.8); + }); + test('chartMax() for success threshold that is above value and fail threshold', () => { + expect(chartMax(2, [2, 3, 4], [1, 2, 6])).toBe(7.2); + }); + + test('metricSubstatus() for metric with pending status', () => { + expect(metricSubstatus(AnalysisStatus.Pending, 0, 0, 0)).toBe(undefined); + }); + test('metricSubstatus() for successful metric with no issues', () => { + expect(metricSubstatus(AnalysisStatus.Successful, 0, 0, 0)).toBe(undefined); + }); + test('metricSubstatus() for successful metric with failures', () => { + expect(metricSubstatus(AnalysisStatus.Successful, 2, 0, 0)).toBe(FunctionalStatus.ERROR); + }); + test('metricSubstatus() for successful metric with errors', () => { + expect(metricSubstatus(AnalysisStatus.Successful, 0, 2, 0)).toBe(FunctionalStatus.WARNING); + }); + + test('metricStatusLabel() for metric with unknown status', () => { + expect(metricStatusLabel(AnalysisStatus.Unknown, 0, 0, 0)).toBe('Analysis status unknown'); + }); + test('metricStatusLabel() for metric with successful status with failures', () => { + expect(metricStatusLabel(AnalysisStatus.Successful, 1, 0, 0)).toBe('Analysis passed with measurement failures'); + }); + test('metricStatusLabel() for metric with successful status with errors', () => { + expect(metricStatusLabel(AnalysisStatus.Successful, 0, 1, 0)).toBe('Analysis passed with measurement errors'); + }); + test('metricStatusLabel() for metric with successful status with inconclusives', () => { + expect(metricStatusLabel(AnalysisStatus.Successful, 0, 0, 1)).toBe('Analysis passed with inconclusive measurements'); + }); + test('metricStatusLabel() for metric with successful status with multiple issues', () => { + expect(metricStatusLabel(AnalysisStatus.Successful, 1, 2, 3)).toBe('Analysis passed with multiple issues'); + }); + + test('interpolateQuery() for no query', () => { + expect(interpolateQuery(undefined, MOCK_ARGS)).toBe(undefined); + }); + test('interpolateQuery() for prometheus query with no args', () => { + expect(interpolateQuery(MOCK_QUERY_PROMETHEUS, [])).toBe(MOCK_QUERY_PROMETHEUS); + }); + test('interpolateQuery() for prometheus query and args', () => { + expect(interpolateQuery(MOCK_QUERY_PROMETHEUS, MOCK_ARGS_PROMETHEUS)).toBe( + 'sum(irate(istio_requests_total{reporter="source",destination_service=~"istio-host-split-canary",response_code!~"5.*"}[5m])) / sum(irate(istio_requests_total{reporter="source",destination_service=~"istio-host-split-canary"}[5m]))' + ); + }); + test('interpolateQuery() for newrelic query and args', () => { + expect(interpolateQuery(MOCK_QUERY_NEWRELIC, MOCK_ARGS_NEWRELIC)).toBe( + "FROM Transaction SELECT percentage(count(*), WHERE httpResponseCode != 500) as successRate where appName = 'myApp'" + ); + }); + test('interpolateQuery() for simple datadog query and args', () => { + expect(interpolateQuery(MOCK_QUERY_DATADOG, MOCK_ARGS_DATADOG)).toBe('sum:requests.error.rate{service:istio-host-split-canary}'); + }); + test('interpolateQuery() for wavefront query and args', () => { + expect(interpolateQuery(MOCK_QUERY_WAVEFRONT, MOCK_ARGS_WAVEFRONT)).toBe( + 'sum(rate(5m, ts("istio.requestcount.count", response_code!=500 and destination_service="istio-host-split-canary"))) / sum(rate(5m, ts("istio.requestcount.count", reporter=client and destination_service="istio-host-split-canary")))' + ); + }); + test('interpolateQuery() for graphite query and args', () => { + expect(interpolateQuery(MOCK_QUERY_GRAPHITE, MOCK_ARGS_GRAPHITE)).toBe( + "target=summarize(asPercent(sumSeries(stats.timers.httpServerRequests.app.istio-host-split-canary.exception.*.method.*.outcome.{CLIENT_ERROR,INFORMATIONAL,REDIRECTION,SUCCESS}.status.*.uri.*.count), sumSeries(stats.timers.httpServerRequests.app.istio-host-split-canary.exception.*.method.*.outcome.*.status.*.uri.*.count)),'5min','avg')" + ); + }); + test('interpolateQuery() for influxdb query and args', () => { + expect(interpolateQuery(MOCK_QUERY_INFLUXDB, MOCK_ARGS_INFLUXDB)).toBe( + 'from(bucket: "app_istio") range(start: -15m) filter(fn: (r) => r["destination_workload"] == "myApp")|> filter(fn: (r) => r["_measurement"] == "istio:istio_requests_errors_percentage:rate1m:5xx")' + ); + }); + test('interpolateQuery() for skywalking query and args', () => { + expect(interpolateQuery(MOCK_QUERY_SKYWALKING, MOCK_ARGS_SKYWALKING)).toBe( + 'query queryData($duration: Duration!) { service_apdex: readMetricsValues(condition: { name: "service_apdex", entity: { scope: Service, serviceName: "istio-host-split-canary", normal: true } }, duration: $duration) { label values { values { value } } } }' + ); + }); + + test('printableDataDogQuery() with v2 query and formula', () => { + expect(printableDatadogQuery(MOCK_PROVIDER_DATADOG_V2_1.datadog, MOCK_ARGS_DATADOG)).toStrictEqual([ + `query: sum:requests.errors{service:istio-host-split-canary}.as_count(), formula: moving_rollup(a, 60, 'sum') / b`, + ]); + }); + test('printableDataDogQuery() with v2 queries and formula', () => { + expect(printableDatadogQuery(MOCK_PROVIDER_DATADOG_V2_2.datadog, MOCK_ARGS_DATADOG)).toStrictEqual([ + `queries: {"a":"sum:requests.errors{service:istio-host-split-canary}.as_count()","b":"sum:requests{service:istio-host-split-canary}.as_count()"}, formula: moving_rollup(a, 60, 'sum') / b`, + ]); + }); + + test('printableCloudWatchQuery() with metricDataQueries', () => { + expect(printableCloudWatchQuery(MOCK_PROVIDER_CLOUDWATCH)).toStrictEqual([ + '{"id":"rate","expression":"errors / requests"}', + '{"id":"errors","metricStat":{"metric":{"namespace":"app","metricName":"errors"},"stat":"Sum","unit":"Count"},"returnData":false}', + '{"id":"requests","metricStat":{"metric":{"namespace":"app","metricName":"requests"},"stat":"Sum","unit":"Count"},"returnData":false}', + ]); + }); + + test('isChartable() for undefined', () => { + expect(isChartable(undefined)).toBe(false); + }); + test('isChartable() for null', () => { + expect(isChartable(null)).toBe(true); + }); + test('isChartable() for a string', () => { + expect(isChartable('abc')).toBe(false); + }); + test('isChartable() for an array', () => { + expect(isChartable([1, 2, 5, 3])).toBe(false); + }); + test('isChartable() for a positive number', () => { + expect(isChartable(5)).toBe(true); + }); + test('isChartable() for a negative number', () => { + expect(isChartable(-5)).toBe(true); + }); + + test('formattedValue() for null', () => { + expect(formattedValue(null)).toBe(null); + }); + test('formattedValue() for an int', () => { + expect(formattedValue(1)).toBe(1); + }); + test('formattedValue() for a float', () => { + expect(formattedValue(1.2653)).toBe(1.27); + }); + test('formattedValue() for a string', () => { + expect(formattedValue('abc')).toBe('abc'); + }); + test('formattedValue() for an array of numbers', () => { + expect(formattedValue([1, 4, 3, 7])).toBe('1,4,3,7'); + }); + + test('formatSingleItemArrayMeasurement() with out of bounds accessor', () => { + expect(formatSingleItemArrayMeasurement([4], 1)).toEqual({ + canChart: true, + chartValue: {1: null}, + tableValue: {1: null}, + }); + }); + test('formatSingleItemArrayMeasurement() for a value like [`abc`] with accessor 0', () => { + expect(formatSingleItemArrayMeasurement(['abc'], 0)).toEqual({ + canChart: false, + tableValue: {0: 'abc'}, + }); + }); + test('formatSingleItemArrayMeasurement() for a value like [4] with accessor 0', () => { + expect(formatSingleItemArrayMeasurement([4], 0)).toEqual({ + canChart: true, + chartValue: {0: 4}, + tableValue: {0: 4}, + }); + }); + test('formatSingleItemArrayMeasurement() for a value like [null] with accessor 0', () => { + expect(formatSingleItemArrayMeasurement([null], 0)).toEqual({ + canChart: true, + chartValue: {0: null}, + tableValue: {0: null}, + }); + }); + + test('formatMultiItemArrayMeasurement() with an empty array', () => { + expect(formatMultiItemArrayMeasurement([])).toEqual({ + canChart: false, + tableValue: '', + }); + }); + test('formatMultiItemArrayMeasurement() with all numbers', () => { + expect(formatMultiItemArrayMeasurement([4, 6, 3, 5])).toEqual({ + canChart: true, + chartValue: 4, + tableValue: '4,6,3,5', + }); + }); + test('formatMultiItemArrayMeasurement() with null as the first item', () => { + expect(formatMultiItemArrayMeasurement([null, 6, 3, 5])).toEqual({ + canChart: true, + chartValue: null, + tableValue: 'null,6,3,5', + }); + }); + test('formatMultiItemArrayMeasurement() with a string as the first item', () => { + expect(formatMultiItemArrayMeasurement(['abc', 6, 3, 5])).toEqual({ + canChart: false, + tableValue: 'abc,6,3,5', + }); + }); + + test('formatKeyValueMeasurement() with key value pairs and no matching accessors', () => { + expect(formatKeyValueMeasurement({cpuUsage: 50, latency: 500}, ['errorRate'])).toEqual({ + canChart: false, + chartValue: {errorRate: null}, + tableValue: {errorRate: null}, + }); + }); + test('formatKeyValueMeasurement() with key value pairs and a single matching accessor', () => { + expect(formatKeyValueMeasurement({cpuUsage: 50, latency: 500}, ['latency'])).toEqual({ + canChart: true, + chartValue: {latency: 500}, + tableValue: {latency: 500}, + }); + }); + test('formatKeyValueMeasurement() with key value pairs and multiple matching accessors', () => { + expect(formatKeyValueMeasurement({cpuUsage: 50, latency: 500}, ['latency', 'cpuUsage'])).toEqual({ + canChart: true, + chartValue: {latency: 500, cpuUsage: 50}, + tableValue: {latency: 500, cpuUsage: 50}, + }); + }); + test('formatKeyValueMeasurement() with key value pairs all null and matching accessors', () => { + expect(formatKeyValueMeasurement({cpuUsage: null, latency: null}, ['latency', 'cpuUsage'])).toEqual({ + canChart: false, + chartValue: {latency: null, cpuUsage: null}, + tableValue: {latency: null, cpuUsage: null}, + }); + }); +}); diff --git a/ui/src/app/components/analysis-modal/transforms.ts b/ui/src/app/components/analysis-modal/transforms.ts new file mode 100644 index 0000000000..a655dda1c4 --- /dev/null +++ b/ui/src/app/components/analysis-modal/transforms.ts @@ -0,0 +1,648 @@ +// eslint-disable-file @typescript-eslint/ban-ts-comment +import * as moment from 'moment'; + +import { + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CloudWatchMetric, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Measurement, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult, + RolloutAnalysisRunSpecAndStatus, +} from '../../../models/rollout/generated'; +import {AnalysisStatus, FunctionalStatus, MeasurementSetInfo, MeasurementValueInfo, TransformedMeasurement, TransformedMetric, TransformedValueObject} from './types'; + +export const isFiniteNumber = (value: any) => Number.isFinite(value); + +export const roundNumber = (value: number): number => Math.round(value * 100) / 100; + +export const isValidDate = (value?: string): boolean => value !== undefined && moment(value).isValid(); + +// Overall Analysis Utils + +/** + * + * @param startTime start time of the analysis run + * @returns timestamp in ms or null + */ +export const analysisStartTime = (startTime?: string): number | null => (isValidDate(startTime) ? new Date(startTime).getTime() : null); + +/** + * + * @param metricResults array of metric results + * @returns timestamp in ms or null + */ +export const analysisEndTime = (metricResults: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult[]): number | null => { + if (metricResults.length === 0) { + return null; + } + + const measurementEndTimes: number[] = []; + metricResults.forEach((metricResult) => { + (metricResult.measurements ?? []).forEach((measurement) => { + // @ts-ignore + if (isValidDate(measurement.finishedAt)) { + // @ts-ignore + measurementEndTimes.push(new Date(measurement.finishedAt).getTime()); + } + }); + }); + + const latestTime = Math.max(...measurementEndTimes); + return isFiniteNumber(latestTime) ? latestTime : null; +}; + +// Arg Utils + +/** + * + * @param args arguments name/value pairs associated with the analysis run + * @param argName name of arg for which to find the value + * @returns + * value associated with the arg + * or null if args is empty + * or null if argName is not present in args + * or null if arg value is undefined or null + */ +export const argValue = (args: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[], argName: string): string | null => + args.find((arg) => arg.name === argName)?.value ?? null; + +// Metric Utils + +/** + * + * @param providerInfo metric provider object + * @returns first key in the provider object + */ +export const metricProvider = (providerInfo: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider): string => + Object.keys(providerInfo)?.[0] ?? 'unsupported provider'; + +const PROVIDER_CONDITION_SUPPORT: { + [key: string]: (resultAccessor: string) => { + isFormatSupported: boolean; + conditionKey: string | null; + }; +} = { + prometheus: (resultAccessor: string) => ({ + isFormatSupported: resultAccessor === 'result[0]', + conditionKey: '0', + }), + datadog: (resultAccessor: string) => ({ + isFormatSupported: ['result', 'default(result, 0)'].includes(resultAccessor), + conditionKey: resultAccessor.includes('0') ? '0' : null, + }), + wavefront: (resultAccessor: string) => ({ + isFormatSupported: resultAccessor === 'result', + conditionKey: null, + }), + newRelic: (resultAccessor: string) => ({ + isFormatSupported: resultAccessor.startsWith('result.'), + conditionKey: resultAccessor.substring(7), + }), + cloudWatch: (resultAccessor: string) => ({ + isFormatSupported: false, + conditionKey: null, + }), + graphite: (resultAccessor: string) => ({ + isFormatSupported: resultAccessor === 'result[0]', + conditionKey: '0', + }), + influxdb: (resultAccessor: string) => ({ + isFormatSupported: resultAccessor === 'result[0]', + conditionKey: '0', + }), + skywalking: (resultAccessor: string) => ({ + isFormatSupported: false, + conditionKey: null, + }), +}; + +/** + * + * @param condition failure_condition or success_condition with the format + * [result accessor] [operator] {{ args.[argname] }} + * or [result accessor] [operator] [value] + * @param args arguments name/value pairs associated with the analysis run + * @returns + * label - a friendly fail/success condition label and + * thresholds - threshold values that can be converted into numbers + * conditionKeys - string keys for the values being compared in the condition + */ +export const conditionDetails = ( + condition?: string, + args: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[] = [], + provider?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider +): { + label: string | null; + thresholds: number[]; + conditionKeys: string[]; +} => { + if (condition === undefined || condition === '' || provider === undefined || metricProvider(provider) === 'unsupported provider') { + return { + label: null, + thresholds: [], + conditionKeys: [], + }; + } + + const interpolatedCondition = interpolateQuery(condition, args); + const subconditions = interpolatedCondition.split(/ && | \|\| /); + + const providerType = metricProvider(provider); + const thresholds: number[] = []; + const conditionKeys: string[] = []; + + // for each subcondition, if it deemed to be a supported subcondition, add keys and numeric thresholds + subconditions.forEach((subcondition) => { + const subconditionParts = subcondition.split(' '); + if (subconditionParts.length === 3) { + const providerInfo = PROVIDER_CONDITION_SUPPORT[providerType]?.(subconditionParts[0].trim()); + const isFormatSupported = providerInfo?.isFormatSupported ?? false; + const conditionKey = providerInfo?.conditionKey ?? null; + + const isUnderOverThreshold = subconditionParts[1].includes('<') || subconditionParts[1].includes('>'); + const isChartableThreshold = isFiniteNumber(parseFloat(subconditionParts[2])); + + if (isFormatSupported && isUnderOverThreshold && isChartableThreshold) { + if (conditionKey !== null) { + conditionKeys.push(conditionKey); + } + thresholds.push(Number(subconditionParts[2])); + } + } + }); + + return { + label: interpolatedCondition, + thresholds, + conditionKeys: [...new Set(conditionKeys)], + }; +}; + +/** + * + * @param thresholds threshold values + * @returns number formatted to two decimal points + */ +export const formatThresholdsForChart = (thresholds: number[]): (number | null)[] => thresholds.map((t) => roundNumber(t)); + +/** + * + * @param valueMax max value for a measurement + * @param failThresholds fail thresholds for the metric + * @param successThresholds success thresholds for the metric + * @returns 120% of the max content value which could either be a data point or one of the thresholds + * or 1 if the max value is less than 1 and there are no thresholds + */ +export const chartMax = (valueMax: number, failThresholds: number[] | null, successThresholds: number[] | null) => { + if (valueMax < 1 && failThresholds === null && successThresholds === null) { + return 1; + } + const failThresholdMax = failThresholds !== null && failThresholds.length > 0 ? Math.max(...failThresholds) : Number.NEGATIVE_INFINITY; + const successThresholdMax = successThresholds !== null && successThresholds.length > 0 ? Math.max(...successThresholds) : Number.NEGATIVE_INFINITY; + return roundNumber(Math.max(valueMax, failThresholdMax, successThresholdMax) * 1.2); +}; + +/** + * + * @param phase analysis phase + * @returns analysis phase adjusted to render the UI status with a more accurate functional status + */ +export const getAdjustedMetricPhase = (phase?: AnalysisStatus): AnalysisStatus => (phase === AnalysisStatus.Error ? AnalysisStatus.Failed : phase ?? AnalysisStatus.Unknown); + +/** + * + * @param specAndStatus analysis spec and status information + * @returns analysis metrics with additional information to render to the UI + */ +export const transformMetrics = (specAndStatus?: RolloutAnalysisRunSpecAndStatus): {[key: string]: TransformedMetric} => { + if (specAndStatus?.spec === undefined || specAndStatus?.status === undefined) { + return {}; + } + + const {spec, status} = specAndStatus; + + const transformedMetrics: {[key: string]: TransformedMetric} = {}; + status.metricResults?.forEach((metricResults, idx) => { + const metricName = metricResults?.name ?? `Unknown metric ${idx}`; + const metricSpec = spec?.metrics?.find((m) => m.name === metricName); + + if (metricSpec !== undefined) { + // spec values + const failConditionInfo = conditionDetails(metricSpec.failureCondition, spec.args, metricSpec.provider); + const failThresholds = failConditionInfo.thresholds.length > 0 ? formatThresholdsForChart(failConditionInfo.thresholds) : null; + const successConditionInfo = conditionDetails(metricSpec.successCondition, spec.args, metricSpec.provider); + const successThresholds = successConditionInfo.thresholds.length > 0 ? formatThresholdsForChart(successConditionInfo.thresholds) : null; + + // value keys are needed for measurement values formatted as {key1: value1, key2: value2} + const conditionKeys = [...new Set([...failConditionInfo.conditionKeys, ...successConditionInfo.conditionKeys])]; + + // results values + const transformedMeasurementInfo = transformMeasurements(conditionKeys, metricResults?.measurements); + const {measurements, chartable, min, max} = transformedMeasurementInfo; + + const metricStatus = (metricResults?.phase ?? AnalysisStatus.Unknown) as AnalysisStatus; + const measurementFailures = metricResults?.failed ?? 0; + const measurementErrors = metricResults?.error ?? 0; + const measurementInconclusives = metricResults?.inconclusive ?? 0; + + transformedMetrics[metricName] = { + name: metricName, + spec: { + ...metricSpec, + queries: metricQueries(metricSpec.provider, spec.args), + failConditionLabel: failConditionInfo.label, + failThresholds, + successConditionLabel: successConditionInfo.label, + successThresholds, + conditionKeys, + }, + status: { + ...metricResults, + adjustedPhase: getAdjustedMetricPhase(metricStatus), + statusLabel: metricStatusLabel(metricStatus, measurementFailures, measurementErrors, measurementInconclusives), + substatus: metricSubstatus(metricStatus, measurementFailures, measurementErrors, measurementInconclusives), + transformedMeasurements: measurements, + chartable, + chartMin: min, + chartMax: chartMax(max, failThresholds, successThresholds), + }, + }; + } + }); + + return transformedMetrics; +}; + +/** + * + * @param status analysis metric status + * @param failures number of measurement failures + * @param errors number of measurement errors + * @param inconclusives number of inconclusive measurements + * @returns ui state substatus to indicate that there were errors/failures/ + * inconclusives + */ +export const metricSubstatus = (status: AnalysisStatus, failures: number, errors: number, inconclusives: number): FunctionalStatus.ERROR | FunctionalStatus.WARNING | undefined => { + switch (status) { + case AnalysisStatus.Pending: + case AnalysisStatus.Failed: + case AnalysisStatus.Inconclusive: + case AnalysisStatus.Error: + return undefined; + case AnalysisStatus.Running: + case AnalysisStatus.Successful: + if (failures > 0) { + return FunctionalStatus.ERROR; + } + if (errors > 0 || inconclusives > 0) { + return FunctionalStatus.WARNING; + } + return undefined; + default: + return undefined; + } +}; + +/** + * + * @param status analysis metric status + * @param failures number of measurement failures + * @param errors number of measurement errors + * @param inconclusives number of inconclusive measurements + * @returns descriptive label to include more details beyond the overall + * analysis status + */ +export const metricStatusLabel = (status: AnalysisStatus, failures: number, errors: number, inconclusives: number) => { + let extraDetails = ''; + const hasFailures = failures > 0; + const hasErrors = errors > 0; + const hasInconclusives = inconclusives > 0; + switch (status) { + case AnalysisStatus.Unknown: + return 'Analysis status unknown'; + case AnalysisStatus.Pending: + return 'Analysis pending'; + case AnalysisStatus.Running: + return 'Analysis in progress'; + case AnalysisStatus.Failed: + return `Analysis failed`; + case AnalysisStatus.Inconclusive: + return `Analysis inconclusive`; + case AnalysisStatus.Error: + return 'Analysis errored'; + case AnalysisStatus.Successful: + if (hasFailures && !hasErrors && !hasInconclusives) { + extraDetails = 'with measurement failures'; + } else if (!hasFailures && hasErrors && !hasInconclusives) { + extraDetails = 'with measurement errors'; + } else if (!hasFailures && !hasErrors && hasInconclusives) { + extraDetails = 'with inconclusive measurements'; + } else if (hasFailures || hasErrors || hasInconclusives) { + extraDetails = 'with multiple issues'; + } + return `Analysis passed ${extraDetails}`.trim(); + default: + return ''; + } +}; + +/** + * + * @param query query for an analysis run metric + * @param args arguments name/value pairs associated with the analysis run + * @returns the query with all {{ args.[argName] }} replaced with + * the value of the arg + */ +export const interpolateQuery = (query?: string, args?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[]) => { + if (query === undefined) { + return undefined; + } + if (args === undefined || args.length === 0) { + return query; + } + + const regex = /\{{.*?\}}/g; + return query.replace(regex, (match) => { + const argPieces = match.replace(/[{ }]/g, '').split('.'); + const replacementValue = argValue(args, argPieces?.[1] ?? ''); + return replacementValue ?? match; + }); +}; + +/** + * + * @param datadog datadog metric object + * @param args arguments name/value pairs associated with the analysis run + * @returns query formatted for display or undefined + */ +export const printableDatadogQuery = ( + datadog: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric, + args: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[] +): string[] | undefined => { + if ((datadog.apiVersion ?? '').toLowerCase() === 'v1' && 'query' in datadog) { + return [interpolateQuery(datadog.query, args)]; + } + if ((datadog.apiVersion ?? '').toLowerCase() === 'v2') { + if ('query' in datadog) { + return 'formula' in datadog ? [`query: ${interpolateQuery(datadog.query, args)}, formula: ${datadog.formula}`] : [interpolateQuery(datadog.query, args)]; + } + if ('queries' in datadog) { + let interpolatedQueries: {[key: string]: string} = {}; + Object.keys(datadog.queries).forEach((queryKey) => { + interpolatedQueries[queryKey] = interpolateQuery(datadog.queries[queryKey], args); + }); + return 'formula' in datadog + ? [`queries: ${JSON.stringify(interpolatedQueries)}, formula: ${datadog.formula}`] + : Object.values(datadog.queries).map((query) => interpolateQuery(query, args)); + } + } + return undefined; +}; + +/** + * + * @param cloudWatch cloudwatch metric object + * @returns query formatted for display or undefined + */ +export const printableCloudWatchQuery = (cloudWatch: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CloudWatchMetric): string[] | undefined => { + return Array.isArray(cloudWatch.metricDataQueries) ? cloudWatch.metricDataQueries.map((query) => JSON.stringify(query)) : undefined; +}; + +/** + * + * @param provider metric provider object + * @param args arguments name/value pairs associated with the analysis run + * @returns query formatted for display or undefined + */ +export const metricQueries = ( + provider?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricProvider | null, + args: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Argument[] = [] +): string[] | undefined => { + if (provider === undefined || provider === null) { + return undefined; + } + const providerType = metricProvider(provider); + switch (providerType) { + case 'prometheus': + return [interpolateQuery(provider.prometheus.query, args)]; + case 'datadog': + return printableDatadogQuery(provider.datadog, args); + case 'wavefront': + return [interpolateQuery(provider.wavefront.query, args)]; + case 'newRelic': + return [interpolateQuery(provider.newRelic.query, args)]; + case 'cloudWatch': + return printableCloudWatchQuery(provider.cloudWatch); + case 'graphite': + return [interpolateQuery(provider.graphite.query, args)]; + case 'influxdb': + return [interpolateQuery(provider.influxdb.query, args)]; + case 'skywalking': + return [interpolateQuery(provider.skywalking.query, args)]; + // not currently supported: kayenta, web, job, plugin + default: + return undefined; + } +}; + +// Measurement Utils + +/** + * + * @param conditionKeys keys from success/fail conditions used in some cases to pull values from the measurement result + * @param measurements array of metric measurements + * @returns formatted measurement values and chart information if the metric can be charted + */ +export const transformMeasurements = (conditionKeys: string[], measurements?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Measurement[]): MeasurementSetInfo => { + if (measurements === undefined || measurements.length === 0) { + return { + chartable: false, + min: 0, + max: null, + measurements: [], + }; + } + + return measurements.reduce( + ( + acc: {chartable: boolean; min: number; max: number | null; measurements: TransformedMeasurement[]}, + currMeasurement: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Measurement + ) => { + const transformedValue = transformMeasurementValue(conditionKeys, currMeasurement.value); + const {canChart, tableValue} = transformedValue; + const canCompareToBoundaries = canChart && transformedValue.chartValue !== null && isFiniteNumber(transformedValue.chartValue ?? null); + + return { + chartable: acc.chartable && canChart, + min: canCompareToBoundaries ? Math.min(Number(transformedValue.chartValue), acc.min) : acc.min, + max: canCompareToBoundaries ? Math.max(Number(transformedValue.chartValue), acc.max ?? 0) : acc.max, + measurements: [ + ...acc.measurements, + { + ...currMeasurement, + chartValue: transformedValue.chartValue, + tableValue, + }, + ], + }; + }, + {chartable: true, min: 0, max: null, measurements: [] as TransformedMeasurement[]} + ); +}; + +/** + * + * @param value value to check for chartability + * @returns whether the data point can be added to a line chart (number or null) + */ +export const isChartable = (value: any): boolean => isFiniteNumber(value) || value === null; + +type FormattedMeasurementValue = number | string | null; + +/** + * + * @param value value to display + * @returns value formatted for display purposes + */ +export const formattedValue = (value: any): FormattedMeasurementValue => { + const isNum = isFiniteNumber(value); + return isNum ? roundNumber(Number(value)) : value?.toString() ?? null; +}; + +/** + * + * @param value measurement value number (examples: 4 or 4.05) + * @returns information about displaying the measurement value + */ +const formatNumberMeasurement = (value: number): MeasurementValueInfo => { + const displayValue = formattedValue(value); + return { + canChart: true, + chartValue: displayValue, + tableValue: displayValue, + }; +}; + +/** + * + * @param value measurement value array (examples: [4] or [null] or ['anything else']) + * @param accessor key by which to access measurement value + * @returns information about displaying the measurement value + */ +export const formatSingleItemArrayMeasurement = (value: FormattedMeasurementValue[], accessor: number): MeasurementValueInfo => { + if (isFiniteNumber(accessor)) { + const measurementValue = value?.[accessor] ?? null; + // if it's a number or null, chart it + if (isFiniteNumber(measurementValue) || measurementValue === null) { + const displayValue = formattedValue(measurementValue); + return { + canChart: isChartable(measurementValue), + chartValue: {[accessor]: displayValue}, + tableValue: {[accessor]: displayValue}, + }; + } + // if it exists, but it's not a good format, just put it in a table + return { + canChart: false, + tableValue: {[accessor]: measurementValue.toString()}, + }; + } + return { + canChart: false, + tableValue: value.toString(), + }; +}; + +/** + * + * @param value measurement value array (examples: [4,6,3,5] or [4,6,null,5] or [4,6,'a string',5]) + * @returns information about displaying the measurement value (charts a chartable first value, shows stringified value in table)) + */ +export const formatMultiItemArrayMeasurement = (value: FormattedMeasurementValue[]): MeasurementValueInfo => { + if (value.length === 0) { + return { + canChart: false, + tableValue: '', + }; + } + + const firstMeasurementValue = value[0]; + const canChartFirstValue = isChartable(firstMeasurementValue); + return { + canChart: canChartFirstValue, + ...(canChartFirstValue && {chartValue: formattedValue(firstMeasurementValue)}), + tableValue: value.map((v) => String(v)).toString(), + }; +}; + +/** + * + * @param value measurement value object (example: { key1: 5, key2: 154, key3: 'abc' } + * @param accessors keys by which to access measurement values + * @returns information about displaying the measurement value (returns TransformedObjectValue)) + */ +export const formatKeyValueMeasurement = (value: {[key: string]: FormattedMeasurementValue}, accessors: string[]): MeasurementValueInfo => { + const transformedValue: TransformedValueObject = {}; + let canChart = true; + accessors.forEach((accessor) => { + if (accessor in value) { + const measurementValue = value[accessor]; + const displayValue = formattedValue(measurementValue); + canChart = canChart && isChartable(measurementValue); + transformedValue[accessor] = displayValue; + } else { + transformedValue[accessor] = null; + } + }); + return { + canChart: canChart && !Object.values(transformedValue).every((v: FormattedMeasurementValue) => v === null), + chartValue: transformedValue, + tableValue: transformedValue, + }; +}; + +/** + * + * @param conditionKeys keys from success/fail conditions used in some cases to pull values from the measurement result + * @param value measurement value returned by provider + * @returns chart and table data along with a flag indicating whether the measurement value can be charted + */ +const transformMeasurementValue = (conditionKeys: string[], value?: string): MeasurementValueInfo => { + if (value === undefined || value === '') { + return { + canChart: true, + chartValue: null, + tableValue: null, + }; + } + + const parsedValue = JSON.parse(value); + + // single number measurement value + if (isFiniteNumber(parsedValue)) { + return formatNumberMeasurement(parsedValue); + } + + // single item array measurement value + if (Array.isArray(parsedValue) && parsedValue.length > 0 && conditionKeys.length === 1) { + const accessor = parseInt(conditionKeys[0]); + return formatSingleItemArrayMeasurement(parsedValue, accessor); + } + + // multi-item array measurement value + if (Array.isArray(parsedValue) && parsedValue.length > 0) { + return formatMultiItemArrayMeasurement(parsedValue); + } + + // key / value pairs measurement value + if (typeof parsedValue === 'object' && !Array.isArray(parsedValue) && conditionKeys.length > 0) { + return formatKeyValueMeasurement(parsedValue, conditionKeys); + } + + // unsupported formats are stringified and put into table + return { + canChart: false, + tableValue: parsedValue.toString(), + }; +}; diff --git a/ui/src/app/components/analysis-modal/types.ts b/ui/src/app/components/analysis-modal/types.ts new file mode 100644 index 0000000000..5b4479a16b --- /dev/null +++ b/ui/src/app/components/analysis-modal/types.ts @@ -0,0 +1,70 @@ +import { + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Measurement, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Metric, + GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult, +} from '../../../models/rollout/generated'; + +export enum AnalysisStatus { + Successful = 'Successful', + Error = 'Error', + Failed = 'Failed', + Running = 'Running', + Pending = 'Pending', + Inconclusive = 'Inconclusive', + Unknown = 'Unknown', // added by frontend +} + +export enum FunctionalStatus { + ERROR = 'ERROR', + INACTIVE = 'INACTIVE', + IN_PROGRESS = 'IN_PROGRESS', + SUCCESS = 'SUCCESS', + WARNING = 'WARNING', +} + +export type TransformedMetricStatus = GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1MetricResult & { + adjustedPhase: AnalysisStatus; + chartable: boolean; + chartMax: number | null; + chartMin: number; + statusLabel: string; + substatus?: FunctionalStatus.ERROR | FunctionalStatus.WARNING; + transformedMeasurements: TransformedMeasurement[]; +}; + +export type TransformedMetricSpec = GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Metric & { + failConditionLabel: string | null; + failThresholds: number[] | null; + queries?: string[]; + successConditionLabel: string | null; + successThresholds: number[] | null; + conditionKeys: string[]; +}; + +export type TransformedMetric = { + name: string; + spec?: TransformedMetricSpec; + status: TransformedMetricStatus; +}; + +export type TransformedValueObject = { + [key: string]: number | string | null; +}; + +export type TransformedMeasurement = GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Measurement & { + chartValue?: TransformedValueObject | number | string | null; + tableValue: TransformedValueObject | number | string | null; +}; + +export type MeasurementSetInfo = { + chartable: boolean; + max: number | null; + measurements: TransformedMeasurement[]; + min: number; +}; + +export type MeasurementValueInfo = { + canChart: boolean; + chartValue?: TransformedValueObject | number | string | null; + tableValue: TransformedValueObject | number | string | null; +}; diff --git a/ui/src/app/components/confirm-button/confirm-button.tsx b/ui/src/app/components/confirm-button/confirm-button.tsx index 4dd4f37e7c..e10212d55c 100644 --- a/ui/src/app/components/confirm-button/confirm-button.tsx +++ b/ui/src/app/components/confirm-button/confirm-button.tsx @@ -3,7 +3,7 @@ import * as React from 'react'; import {Button, Popconfirm, Tooltip} from 'antd'; import {ButtonProps} from 'antd/es/button/button'; import {useState} from 'react'; -import { TooltipPlacement } from 'antd/es/tooltip'; +import {TooltipPlacement} from 'antd/es/tooltip'; interface ConfirmButtonProps extends ButtonProps { skipconfirm?: boolean; @@ -13,7 +13,10 @@ interface ConfirmButtonProps extends ButtonProps { export const ConfirmButton = (props: ConfirmButtonProps) => { const [open, setOpen] = useState(false); - const [buttonProps, setButtonProps] = useState(props); + + const {skipconfirm, ...coreButtonProps} = props; + + const [buttonProps, setButtonProps] = useState(coreButtonProps); React.useEffect(() => { const tmp = {...props}; @@ -51,7 +54,8 @@ export const ConfirmButton = (props: ConfirmButtonProps) => { onClick={(e) => { e.stopPropagation(); e.preventDefault(); - }}> + }} + > { okText='Yes' cancelText='No' onOpenChange={handleOpenChange} - placement={props.placement || 'bottom'}> + placement={props.placement || 'bottom'} + >
    diff --git a/ui/src/app/components/header/header.tsx b/ui/src/app/components/header/header.tsx index e267822108..8bdbfc3a83 100644 --- a/ui/src/app/components/header/header.tsx +++ b/ui/src/app/components/header/header.tsx @@ -1,6 +1,7 @@ import * as React from 'react'; import {useParams} from 'react-router'; +import {Key, KeybindingContext} from 'react-keyhooks'; import {NamespaceContext, RolloutAPIContext} from '../../shared/context/api'; import './header.scss'; @@ -11,13 +12,28 @@ import {faBook, faKeyboard} from '@fortawesome/free-solid-svg-icons'; const Logo = () => Argo Logo; -export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: string) => void; showHelp: () => void}) => { +export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: string) => void; showHelp: () => void; hideHelp: () => void}) => { const history = useHistory(); const namespaceInfo = React.useContext(NamespaceContext); const {namespace} = useParams<{namespace: string}>(); const api = React.useContext(RolloutAPIContext); const [version, setVersion] = React.useState('v?'); const [nsInput, setNsInput] = React.useState(namespaceInfo.namespace); + const {useKeybinding} = React.useContext(KeybindingContext); + + useKeybinding([Key.SHIFT, Key.H], + () => { + props.showHelp(); + return true; + }, + true + ); + + useKeybinding(Key.ESCAPE, () => { + props.hideHelp(); + return true; + }); + React.useEffect(() => { const getVersion = async () => { const v = await api.rolloutServiceVersion(); @@ -25,12 +41,14 @@ export const Header = (props: {pageHasShortcuts: boolean; changeNamespace: (val: }; getVersion(); }, []); + React.useEffect(() => { if (namespace && namespace != namespaceInfo.namespace) { props.changeNamespace(namespace); setNsInput(namespace); } }, []); + return (
    diff --git a/ui/src/app/components/info-item/info-item.scss b/ui/src/app/components/info-item/info-item.scss index 0cd040dde6..17cc0d4d4d 100644 --- a/ui/src/app/components/info-item/info-item.scss +++ b/ui/src/app/components/info-item/info-item.scss @@ -8,7 +8,7 @@ margin-right: 5px; color: $argo-color-gray-8; display: flex; - align-items: center; + align-items: left; min-width: 0; &--lightweight { diff --git a/ui/src/app/components/info-item/info-item.tsx b/ui/src/app/components/info-item/info-item.tsx index 7e7bf8e617..7928310bc1 100644 --- a/ui/src/app/components/info-item/info-item.tsx +++ b/ui/src/app/components/info-item/info-item.tsx @@ -1,6 +1,6 @@ import * as React from 'react'; import './info-item.scss'; -import { Tooltip } from 'antd'; +import {Tooltip} from 'antd'; export enum InfoItemKind { Default = 'default', @@ -40,7 +40,7 @@ export const InfoItem = (props: InfoItemProps) => { /** * Displays a right justified InfoItem (or multiple InfoItems) and a left justfied label */ -export const InfoItemRow = (props: {label: string | React.ReactNode; items?: InfoItemProps | InfoItemProps[]; lightweight?: boolean}) => { +export const InfoItemRow = (props: {label: string | React.ReactNode; items?: InfoItemProps | InfoItemProps[]; lightweight?: boolean; style?: React.CSSProperties}) => { let {label, items} = props; let itemComponents = null; if (!Array.isArray(items)) { @@ -55,7 +55,7 @@ export const InfoItemRow = (props: {label: string | React.ReactNode; items?: Inf
    )} - {props.items &&
    {itemComponents}
    } + {props.items &&
    {itemComponents}
    }
    ); }; diff --git a/ui/src/app/components/pods/pods.tsx b/ui/src/app/components/pods/pods.tsx index c3e5fecb32..5e7005fcbd 100644 --- a/ui/src/app/components/pods/pods.tsx +++ b/ui/src/app/components/pods/pods.tsx @@ -1,14 +1,16 @@ import * as React from 'react'; import * as moment from 'moment'; -import {Duration, Ticker} from 'argo-ui'; +import {DropDown, Duration} from 'argo-ui'; import {RolloutReplicaSetInfo} from '../../../models/rollout/generated'; import {ReplicaSetStatus, ReplicaSetStatusIcon} from '../status-icon/status-icon'; import './pods.scss'; -import {Dropdown, MenuProps, Tooltip} from 'antd'; +import {Tooltip} from 'antd'; + import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; import {IconDefinition, faCheck, faCircleNotch, faClipboard, faExclamationTriangle, faQuestionCircle, faTimes} from '@fortawesome/free-solid-svg-icons'; import {EllipsisMiddle} from '../ellipsis-middle/ellipsis-middle'; import {InfoItem} from '../info-item/info-item'; +import {Ticker} from '../ticker/ticker'; export enum PodStatus { Pending = 'pending', @@ -18,7 +20,17 @@ export enum PodStatus { Unknown = 'unknown', } -export const ParsePodStatus = (status: string): PodStatus => { +const isPodReady = (ready: string) => { + // Ready is a string in the format "0/1", "1/1", etc. + const [current, total] = ready.split('/'); + return current === total; +}; + +export const ParsePodStatus = (status: string, ready: string): PodStatus => { + if (status === 'Running' && !isPodReady(ready)) { + return PodStatus.Pending; + } + switch (status) { case 'Pending': case 'Terminating': @@ -48,15 +60,11 @@ export const ReplicaSets = (props: {replicaSets: RolloutReplicaSetInfo[]; showRe return (
    - {replicaSets?.map( - (rsInfo) => - rsInfo.pods && - rsInfo.pods.length > 0 && ( -
    - -
    - ) - )} + {replicaSets?.map((rsInfo) => ( +
    + +
    + ))}
    ); }; @@ -85,7 +93,7 @@ export const ReplicaSet = (props: {rs: RolloutReplicaSetInfo; showRevision?: boo Scaledown in }> - ) as any} icon='fa fa-clock'> + ) as any} icon='fa fa-clock' /> ); }} @@ -96,41 +104,30 @@ export const ReplicaSet = (props: {rs: RolloutReplicaSetInfo; showRevision?: boo )} - {props.rs.pods && props.rs.pods.length > 0 && ( -
    - {(props.rs?.pods || []).map((pod, i) => ( - -
    Status: {pod.status}
    -
    {pod.objectMeta?.name}
    -
    - } - /> - ))} - - )} +
    + {(props.rs?.pods || []).length > 0 + ? (props.rs?.pods || []).map((pod, i) => ( + +
    {pod.objectMeta?.name}
    +
    Status: {pod.status}
    +
    Ready: {pod.ready}
    +
    + } + /> + )) + : 'No Pods!'} + ); }; -const CopyMenu = (name: string): MenuProps['items'] => { - return [ - { - key: 1, - label: ( -
    navigator.clipboard.writeText(name)}> - Copy Name -
    - ), - }, - ]; -}; - -export const PodWidget = ({name, status, tooltip, customIcon}: {name: string; status: string; tooltip: React.ReactNode; customIcon?: IconDefinition}) => { +export const PodWidget = ({name, status, ready, tooltip, customIcon}: {name: string; status: string; ready: string; tooltip: React.ReactNode; customIcon?: IconDefinition}) => { let icon: IconDefinition; let spin = false; if (status.startsWith('Init:')) { @@ -144,7 +141,7 @@ export const PodWidget = ({name, status, tooltip, customIcon}: {name: string; st icon = faExclamationTriangle; } - const className = ParsePodStatus(status); + const className = ParsePodStatus(status, ready); if (customIcon) { icon = customIcon; @@ -171,12 +168,18 @@ export const PodWidget = ({name, status, tooltip, customIcon}: {name: string; st } return ( - - -
    - -
    -
    -
    + ( + +
    + +
    +
    + )}> +
    navigator.clipboard.writeText(name)}> + Copy Name +
    +
    ); }; diff --git a/ui/src/app/components/rollout-actions/rollout-actions.tsx b/ui/src/app/components/rollout-actions/rollout-actions.tsx index 94a4b289f2..fd2ff83f7d 100644 --- a/ui/src/app/components/rollout-actions/rollout-actions.tsx +++ b/ui/src/app/components/rollout-actions/rollout-actions.tsx @@ -107,7 +107,8 @@ export const RolloutActionButton = (props: {action: RolloutAction; rollout: Roll disabled={ap.disabled} loading={loading} tooltip={ap.tooltip} - icon={}> + icon={} + > {props.action} ); diff --git a/ui/src/app/components/rollouts-list/rollouts-list.scss b/ui/src/app/components/rollout-grid-widget/rollout-grid-widget.scss similarity index 98% rename from ui/src/app/components/rollouts-list/rollouts-list.scss rename to ui/src/app/components/rollout-grid-widget/rollout-grid-widget.scss index f0ec5fdcc7..68f92bf93e 100644 --- a/ui/src/app/components/rollouts-list/rollouts-list.scss +++ b/ui/src/app/components/rollout-grid-widget/rollout-grid-widget.scss @@ -171,6 +171,8 @@ $colWidth: ($WIDGET_WIDTH + (2 * $widgetPadding)) + $widgetMarginRight; align-items: center; margin-top: 1.5em; z-index: 10 !important; + color: $argo-color-gray-7; + font-size: 14px; } } -} +} \ No newline at end of file diff --git a/ui/src/app/components/rollout-grid-widget/rollout-grid-widget.tsx b/ui/src/app/components/rollout-grid-widget/rollout-grid-widget.tsx new file mode 100644 index 0000000000..503a0415ed --- /dev/null +++ b/ui/src/app/components/rollout-grid-widget/rollout-grid-widget.tsx @@ -0,0 +1,134 @@ +import * as React from 'react'; +import {Link} from 'react-router-dom'; + +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {faCircleNotch, faRedoAlt} from '@fortawesome/free-solid-svg-icons'; +import {IconDefinition} from '@fortawesome/fontawesome-svg-core'; +import {faStar as faStarSolid} from '@fortawesome/free-solid-svg-icons'; +import {faStar as faStarOutline} from '@fortawesome/free-regular-svg-icons/faStar'; + +import {Tooltip} from 'antd'; + +import {ParsePodStatus, PodStatus, ReplicaSets} from '../pods/pods'; +import {RolloutInfo} from '../../../models/rollout/rollout'; +import {useWatchRollout} from '../../shared/services/rollout'; +import {useClickOutside} from '../../shared/utils/utils'; +import {InfoItemKind, InfoItemRow} from '../info-item/info-item'; +import {RolloutAction, RolloutActionButton} from '../rollout-actions/rollout-actions'; +import {RolloutStatus, StatusIcon} from '../status-icon/status-icon'; +import './rollout-grid-widget.scss'; + +export const isInProgress = (rollout: RolloutInfo): boolean => { + for (const rs of rollout.replicaSets || []) { + for (const p of rs.pods || []) { + const status = ParsePodStatus(p.status, p.ready); + if (status === PodStatus.Pending) { + return true; + } + } + } + return false; +}; + +export const RolloutGridWidget = (props: { + rollout: RolloutInfo; + deselect: () => void; + selected?: boolean; + isFavorite: boolean; + onFavoriteChange: (rolloutName: string, isFavorite: boolean) => void; +}) => { + const [watching, subscribe] = React.useState(false); + let rollout = props.rollout; + useWatchRollout(props.rollout?.objectMeta?.name, watching, null, (r: RolloutInfo) => (rollout = r)); + const ref = React.useRef(null); + useClickOutside(ref, props.deselect); + + React.useEffect(() => { + if (watching) { + const to = setTimeout(() => { + if (!isInProgress(rollout)) { + subscribe(false); + } + }, 5000); + return () => clearTimeout(to); + } + }, [watching, rollout]); + + return ( + + { + subscribe(true); + setTimeout(() => { + subscribe(false); + }, 1000); + }} + isFavorite={props.isFavorite} + handleFavoriteChange={props.onFavoriteChange} + /> +
    + + {(rollout.strategy || '').toLocaleLowerCase() === 'canary' && } +
    + +
    {rollout.message !== 'CanaryPauseStep' && rollout.message}
    +
    + subscribe(true)} indicateLoading /> + subscribe(true)} indicateLoading /> +
    + + ); +}; + +const WidgetHeader = (props: {rollout: RolloutInfo; refresh: () => void; isFavorite: boolean; handleFavoriteChange: (rolloutName: string, isFavorite: boolean) => void}) => { + const {rollout} = props; + const [loading, setLoading] = React.useState(false); + React.useEffect(() => { + setTimeout(() => setLoading(false), 500); + }, [loading]); + + const handleFavoriteClick = (e: React.MouseEvent) => { + e.stopPropagation(); + e.preventDefault(); + props.handleFavoriteChange(rollout.objectMeta?.name, !props.isFavorite); + }; + + return ( +
    + {props.isFavorite ? ( + + ) : ( + + )} + {rollout.objectMeta?.name} + + + { + props.refresh(); + setLoading(true); + e.preventDefault(); + }} + /> + + + +
    + ); +}; diff --git a/ui/src/app/components/rollout/containers.tsx b/ui/src/app/components/rollout/containers.tsx index c69b30658b..73eddc5139 100644 --- a/ui/src/app/components/rollout/containers.tsx +++ b/ui/src/app/components/rollout/containers.tsx @@ -9,6 +9,7 @@ import {faExclamationCircle, faPencilAlt, faSave, faTimes} from '@fortawesome/fr interface ContainersWidgetProps { containers: RolloutContainerInfo[]; images: ImageInfo[]; + name: string; interactive?: { editState: ReactStatePair; setImage: (container: string, image: string, tag: string) => void; @@ -16,7 +17,7 @@ interface ContainersWidgetProps { } export const ContainersWidget = (props: ContainersWidgetProps) => { - const {containers, images, interactive} = props; + const {containers, images, name, interactive} = props; const [editing, setEditing] = interactive?.editState || [null, null]; const inputMap: {[key: string]: string} = {}; for (const container of containers) { @@ -29,7 +30,7 @@ export const ContainersWidget = (props: ContainersWidgetProps) => {
    - Containers + {name}
    {interactive && @@ -63,7 +64,8 @@ export const ContainersWidget = (props: ContainersWidgetProps) => { setError(true); } } - }}> + }} + > {error ? 'ERROR' : 'SAVE'}
    diff --git a/ui/src/app/components/rollout/revision.tsx b/ui/src/app/components/rollout/revision.tsx index e2fcd11526..62f31a0e76 100644 --- a/ui/src/app/components/rollout/revision.tsx +++ b/ui/src/app/components/rollout/revision.tsx @@ -1,16 +1,22 @@ import * as React from 'react'; +import * as moment from 'moment'; import {RolloutAnalysisRunInfo, RolloutExperimentInfo, RolloutReplicaSetInfo} from '../../../models/rollout/generated'; import {IconForTag} from '../../shared/utils/utils'; -import {PodWidget, ReplicaSets} from '../pods/pods'; -import {ImageInfo, parseImages} from './rollout'; +import {ReplicaSets} from '../pods/pods'; +import {ImageInfo, parseImages, parseInitContainerImages} from './rollout'; import './rollout.scss'; import '../pods/pods.scss'; import {ConfirmButton} from '../confirm-button/confirm-button'; import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; -import {faChartBar, faChevronCircleDown, faChevronCircleUp, faUndoAlt} from '@fortawesome/free-solid-svg-icons'; -import {Button, Tooltip} from 'antd'; -import moment = require('moment'); +import {faChevronCircleDown, faChevronCircleUp, faUndoAlt} from '@fortawesome/free-solid-svg-icons'; +import {Button, Space, Tooltip, Typography} from 'antd'; import {InfoItemProps, InfoItemRow} from '../info-item/info-item'; +import {AnalysisModal} from '../analysis-modal/analysis-modal'; +import StatusIndicator from '../analysis-modal/status-indicator/status-indicator'; +import {AnalysisStatus} from '../analysis-modal/types'; +import {getAdjustedMetricPhase} from '../analysis-modal/transforms'; + +const {Text} = Typography; function formatTimestamp(ts: string): string { const inputFormat = 'YYYY-MM-DD HH:mm:ss Z z'; @@ -49,23 +55,24 @@ interface RevisionWidgetProps { initCollapsed?: boolean; rollback?: (revision: number) => void; current: boolean; - message: String; } -export const RevisionWidget = (props: RevisionWidgetProps) => { - const {revision, initCollapsed} = props; +export const RevisionWidget = ({current, initCollapsed, revision, rollback}: RevisionWidgetProps) => { const [collapsed, setCollapsed] = React.useState(initCollapsed); const icon = collapsed ? faChevronCircleDown : faChevronCircleUp; - const images = parseImages(revision.replicaSets); + const images = parseImages(revision.replicaSets ?? []); + const initContainerImages = parseInitContainerImages(revision.replicaSets ?? []); + const combinedImages = images.concat(initContainerImages); const hasPods = (revision.replicaSets || []).some((rs) => rs.pods?.length > 0); + return (
    Revision {revision.number}
    - {!props.current && props.rollback && ( + {!current && rollback && ( props.rollback(Number(revision.number))} + onClick={() => rollback(Number(revision.number))} type='default' icon={} style={{fontSize: '13px', marginRight: '10px'}}> @@ -76,18 +83,16 @@ export const RevisionWidget = (props: RevisionWidgetProps) => {
    - +
    {!collapsed && ( {(revision.analysisRuns || []).length > 0 && ( - -
    - -
    -
    +
    + +
    )}
    )} @@ -95,181 +100,67 @@ export const RevisionWidget = (props: RevisionWidgetProps) => { ); }; -const AnalysisRunWidget = (props: {analysisRuns: RolloutAnalysisRunInfo[]}) => { - const {analysisRuns} = props; - const [selection, setSelection] = React.useState(null); +const analysisName = (ar: RolloutAnalysisRunInfo): string => { + const temp = ar.objectMeta?.name?.split('-') ?? ''; + const len = temp.length; + return len < 2 ? 'Analysis' : `Analysis ${temp[len - 2] + '-' + temp[len - 1]}`; +}; + +interface AnalysisRunWidgetProps { + analysisRuns: RolloutAnalysisRunInfo[]; + images: ImageInfo[]; + revision: string; +} + +const AnalysisRunWidget = ({analysisRuns, images, revision}: AnalysisRunWidgetProps) => { + const [selectedAnalysis, setSelectedAnalysis] = React.useState(null); + const imageNames = images.map((img) => img.image); return (
    Analysis Runs
    - {analysisRuns.map((ar) => { - let temp = ar.objectMeta.name.split('-'); - let len = temp.length; - return ( - -
    - Name: {ar.objectMeta.name} -
    -
    - Created at: - {formatTimestamp(JSON.stringify(ar.objectMeta?.creationTimestamp))} -
    -
    - Status: - {ar.status} -
    - - }> -
    - -
    -
    - ); - })} -
    - - {selection && ( - -
    - {selection.objectMeta?.name} - -
    - {selection?.jobs && ( -
    -
    - {selection.jobs.map((job) => { - return ( - -
    job-name: {job.objectMeta?.name}
    -
    StartedAt: {formatTimestamp(JSON.stringify(job.startedAt))}
    -
    Status: {job.status}
    -
    MetricName: {job.metricName}
    -
    - } - customIcon={faChartBar} - /> - ); - })} -
    - metric.name === selection.jobs[0].metricName) - .map((metric) => { - return ( - - {metric?.name && ( -
    - MetricName: {metric.name} -
    - )} - {metric?.successCondition && ( -
    - SuccessCondition: - {metric.successCondition} -
    - )} - {metric?.failureLimit && ( -
    - FailureLimit: {metric.failureLimit} -
    - )} - {metric?.inconclusiveLimit && ( -
    - InconclusiveLimit: - {metric.inconclusiveLimit} -
    - )} - {metric?.count && ( -
    - Count: - {metric.count} -
    - )} -
    - ); - })}> - -
    -
    - )} - {selection?.nonJobInfo && ( -
    -
    - {selection.nonJobInfo.map((nonJob) => { - return ( - -
    Value: {JSON.stringify(JSON.parse(nonJob.value), null, 2)}
    -
    StartedAt: {formatTimestamp(JSON.stringify(nonJob.startedAt))}
    -
    Status: {nonJob.status}
    -
    MetricName: {nonJob.metricName}
    -
    - } - customIcon={faChartBar} - /> - ); - })} -
    - metric.name === selection.nonJobInfo[0].metricName) - .map((metric) => { - return ( - - {metric?.name && ( -
    - MetricName: {metric.name} -
    - )} - {metric?.successCondition && ( -
    - SuccessCondition: - {metric.successCondition} -
    - )} - {metric?.failureLimit && ( -
    - FailureLimit: {metric.failureLimit} -
    - )} - {metric?.inconclusiveLimit && ( -
    - InconclusiveLimit: - {metric.inconclusiveLimit} -
    - )} - {metric?.count && ( -
    - Count: - {metric.count} -
    - )} -
    - ); - })}> - -
    + {analysisRuns.map((ar) => ( + +
    + Name: {ar.objectMeta.name} +
    +
    + Created at: + {formatTimestamp(JSON.stringify(ar.objectMeta?.creationTimestamp))} +
    +
    + Status: + {ar.status} +
    + + }> +
    +
    - )} - +
    + ))} +
    + {selectedAnalysis !== null && ( + ar.objectMeta.name === selectedAnalysis.objectMeta.name)} + analysisName={analysisName(selectedAnalysis)} + images={imageNames} + revision={revision} + open={selectedAnalysis !== null} + onClose={() => setSelectedAnalysis(null)} + /> )} ); diff --git a/ui/src/app/components/rollout/rollout.tsx b/ui/src/app/components/rollout/rollout.tsx index 91b6c0a9d8..dfecde7e84 100644 --- a/ui/src/app/components/rollout/rollout.tsx +++ b/ui/src/app/components/rollout/rollout.tsx @@ -48,35 +48,23 @@ export const parseImages = (replicaSets: RolloutReplicaSetInfo[]): ImageInfo[] = const unknownImages: {[key: string]: boolean} = {}; (replicaSets || []).forEach((rs) => { (rs.images || []).forEach((img) => { - const tags: ImageTag[] = []; - - if (rs.canary) { - tags.push(ImageTag.Canary); - } - if (rs.stable) { - tags.push(ImageTag.Stable); - } - if (rs.active) { - tags.push(ImageTag.Active); - } - if (rs.preview) { - tags.push(ImageTag.Preview); - } + updateImageInfo(rs,img,images,unknownImages); + }); + }); - if (images[img]) { - images[img].tags = [...tags, ...images[img].tags]; - } else { - images[img] = { - image: img, - tags: tags, - }; - } + const imgArray = Object.values(images); + imgArray.sort((a, b) => { + return unknownImages[a.image] ? 1 : -1; + }); + return imgArray; +}; - if (images[img].tags.length === 0) { - unknownImages[img] = true; - } else { - unknownImages[img] = false; - } +export const parseInitContainerImages = (replicaSets: RolloutReplicaSetInfo[]): ImageInfo[] => { + const images: {[key: string]: ImageInfo} = {}; + const unknownImages: {[key: string]: boolean} = {}; + (replicaSets || []).forEach((rs) => { + (rs.initContainerImages || []).forEach((img) => { + updateImageInfo(rs,img,images,unknownImages); }); }); @@ -87,6 +75,38 @@ export const parseImages = (replicaSets: RolloutReplicaSetInfo[]): ImageInfo[] = return imgArray; }; +const updateImageInfo = (rs: RolloutReplicaSetInfo,img: string ,images: {[key: string]: ImageInfo},unknownImages:{[key: string]: boolean}) => { + const tags: ImageTag[] = []; + + if (rs.canary) { + tags.push(ImageTag.Canary); + } + if (rs.stable) { + tags.push(ImageTag.Stable); + } + if (rs.active) { + tags.push(ImageTag.Active); + } + if (rs.preview) { + tags.push(ImageTag.Preview); + } + + if (images[img]) { + images[img].tags = [...tags, ...images[img].tags]; + } else { + images[img] = { + image: img, + tags: tags, + }; + } + + if (images[img].tags.length === 0) { + unknownImages[img] = true; + } else { + unknownImages[img] = false; + } +}; + export type ReactStatePair = [boolean, React.Dispatch>]; export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: {editState: ReactStatePair; api: RolloutServiceApi; namespace: string}}) => { @@ -94,8 +114,10 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: const curStep = parseInt(rollout.step, 10) || (rollout.steps || []).length; const revisions = ProcessRevisions(rollout); - const images = parseImages(rollout?.replicaSets || []); + const initContainerEditState = React.useState(false); + const initContainerImages = parseInitContainerImages(rollout?.replicaSets || []); + const images = parseImages(rollout?.replicaSets || []); for (const img of images) { for (const container of rollout.containers || []) { if (img.image === container.image) { @@ -132,6 +154,7 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?:
    + {rollout.initContainers &&
    + { + interactive.api.rolloutServiceSetRolloutImage({}, interactive.namespace, rollout.objectMeta?.name, container, image, tag); + }, + } + : null + } + /> +
    }
    @@ -159,7 +199,6 @@ export const RolloutWidget = (props: {rollout: RolloutRolloutInfo; interactive?: initCollapsed={false} rollback={interactive ? (r) => interactive.api.rolloutServiceUndoRollout({}, interactive.namespace, rollout.objectMeta.name, `${r}`) : null} current={i === 0} - message={rollout.message} /> ))}
    @@ -332,7 +371,8 @@ const Step = (props: {step: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1 (props.step.setMirrorRoute && openMirror) ? 'steps__step-title--experiment' : '' - }`}> + }`} + > {icon && } {content} {unit} {props.step.setCanaryScale && ( @@ -457,7 +497,7 @@ const WidgetItemSetMirror = ({value}: {value: GithubComArgoprojArgoRolloutsPkgAp {index} - Path ({stringMatcherType})
    {stringMatcherValue}
    - + , ); } if (val.method != null) { @@ -479,7 +519,7 @@ const WidgetItemSetMirror = ({value}: {value: GithubComArgoprojArgoRolloutsPkgAp {index} - Method ({stringMatcherType})
    {stringMatcherValue}
    - + , ); } return fragments; diff --git a/ui/src/app/components/rollouts-grid/rollouts-grid.scss b/ui/src/app/components/rollouts-grid/rollouts-grid.scss new file mode 100644 index 0000000000..4f6e1dc22f --- /dev/null +++ b/ui/src/app/components/rollouts-grid/rollouts-grid.scss @@ -0,0 +1,8 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +.rollouts-grid { + display: flex; + box-sizing: border-box; + flex-wrap: wrap; + padding-top: 20px; +} diff --git a/ui/src/app/components/rollouts-grid/rollouts-grid.tsx b/ui/src/app/components/rollouts-grid/rollouts-grid.tsx new file mode 100644 index 0000000000..57883c6586 --- /dev/null +++ b/ui/src/app/components/rollouts-grid/rollouts-grid.tsx @@ -0,0 +1,99 @@ +import * as React from 'react'; +import {useHistory} from 'react-router-dom'; +import {Key, KeybindingContext, useNav} from 'react-keyhooks'; +import {RolloutInfo} from '../../../models/rollout/rollout'; +import {RolloutGridWidget} from '../rollout-grid-widget/rollout-grid-widget'; +import './rollouts-grid.scss'; + +export const RolloutsGrid = ({ + rollouts, + onFavoriteChange, + favorites, +}: { + rollouts: RolloutInfo[]; + onFavoriteChange: (rolloutName: string, isFavorite: boolean) => void; + favorites: {[key: string]: boolean}; +}) => { + const [itemsPerRow, setItemsPerRow] = React.useState(0); + const rolloutsGridRef = React.useRef(null); + + const handleFavoriteChange = (rolloutName: string, isFavorite: boolean) => { + onFavoriteChange(rolloutName, isFavorite); + }; + + const orderedRollouts = rollouts + .map((rollout) => { + return { + ...rollout, + key: rollout.objectMeta?.uid, + favorite: favorites[rollout.objectMeta?.name] || false, + }; + }) + .sort((a, b) => { + if (a.favorite && !b.favorite) { + return -1; + } else if (!a.favorite && b.favorite) { + return 1; + } else { + return 0; + } + }); + + // Calculate the number of items per row for keyboard navigation + React.useEffect(() => { + const rolloutsGrid = rolloutsGridRef.current; + + const updateItemsPerRow = () => { + if (rolloutsGrid) { + const rolloutsListWidget = document.querySelector('.rollouts-list__widget'); + if (!rolloutsListWidget) { + return; + } + const containerWidth = rolloutsGrid.clientWidth; + const widgetWidth = parseInt(getComputedStyle(rolloutsListWidget).getPropertyValue('width'), 10); + const widgetPadding = parseInt(getComputedStyle(rolloutsListWidget).getPropertyValue('padding'), 10); + const itemsPerRowValue = Math.floor(containerWidth / (widgetWidth + widgetPadding * 2)); + setItemsPerRow(itemsPerRowValue); + } + }; + + updateItemsPerRow(); + + window.addEventListener('resize', updateItemsPerRow); + + return () => { + window.removeEventListener('resize', updateItemsPerRow); + }; + }, []); + + const history = useHistory(); + const [pos, nav, reset] = useNav(orderedRollouts.length); + const {useKeybinding} = React.useContext(KeybindingContext); + + useKeybinding(Key.RIGHT, () => nav(1)); + useKeybinding(Key.LEFT, () => nav(-1)); + useKeybinding(Key.UP, () => nav(-itemsPerRow)); + useKeybinding(Key.DOWN, () => nav(itemsPerRow)); + useKeybinding(Key.ENTER, () => { + if (pos !== undefined) { + history.push(`/rollout/${orderedRollouts[pos].objectMeta?.name}`); + return true; + } + return false; + }); + + return ( +
    + {orderedRollouts.map((rollout, i) => ( + reset()} + isFavorite={favorites[rollout.objectMeta?.name] || false} + onFavoriteChange={handleFavoriteChange} + /> + ))} +
    + ); +}; diff --git a/ui/src/app/components/rollouts-home/rollouts-home.scss b/ui/src/app/components/rollouts-home/rollouts-home.scss new file mode 100644 index 0000000000..575c8350f6 --- /dev/null +++ b/ui/src/app/components/rollouts-home/rollouts-home.scss @@ -0,0 +1,182 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +$WIDGET_WIDTH: 400px; + +$widgetPadding: 17px; +$widgetMarginRight: 20px; +$colWidth: ($WIDGET_WIDTH + (2 * $widgetPadding)) + $widgetMarginRight; + +.rollouts-home { + height: 100vh; + display: flex; + flex-direction: column; +} + +.rollouts-list { + display: flex; + box-sizing: border-box; + flex-wrap: wrap; + + &__search-container { + width: 50% !important; + margin: 0 auto; + } + + &__search { + width: 100%; + font-size: 15px; + } + + &__rollouts-container { + padding: 20px; + display: flex; + flex-wrap: wrap; + + width: 3 * $colWidth; + margin: 0 auto; + + @media screen and (max-width: (3 * $colWidth)) { + width: 2 * $colWidth; + margin: 0 auto; + } + + @media screen and (max-width: (2 * $colWidth)) { + width: $colWidth; + + .rollouts-list__widget { + margin: 0 inherit; + width: 100%; + } + + .rollouts-list__search-container { + width: 100% !important; + } + } + } + + &__empty-message { + padding-top: 70px; + width: 50%; + margin: 0 auto; + color: $argo-color-gray-7; + h1 { + margin-bottom: 1em; + text-align: center; + } + div { + line-height: 1.5em; + } + pre { + overflow: scroll; + cursor: pointer; + line-height: 2em; + font-size: 15px; + padding: 3px 5px; + color: $argo-color-gray-8; + margin: 0.5em 0; + background-color: white; + } + a { + color: $sea; + border-bottom: 1px solid $sea; + } + + &--dark { + color: $shine; + a { + color: $sky; + border-color: $sky; + } + pre { + background-color: $space; + color: $shine; + } + } + + @media screen and (max-width: (2 * $colWidth)) { + width: 80%; + } + } + + &__toolbar { + width: 100%; + padding: 1em 0; + background-color: white; + border-bottom: 1px solid white; + + &--dark { + border-bottom: 1px solid $silver-lining; + background-color: $space; + } + } + + &__widget { + position: relative; + box-sizing: border-box; + padding: 17px; + font-size: 14px; + margin: 0 10px; + color: $argo-color-gray-7; + width: $WIDGET_WIDTH; + height: max-content; + flex-shrink: 0; + margin-bottom: 1.5em; + border-radius: 5px; + background-color: white; + box-shadow: 1px 2px 2px rgba(0, 0, 0, 0.05); + border: 1px solid $argo-color-gray-4; + z-index: 0; + + &:hover, + &--selected { + border-color: $argo-running-color; + } + + &__pods { + margin-bottom: 1em; + } + + &--dark { + color: $dull-shine; + border-color: $silver-lining; + box-shadow: 1px 2px 3px 1px $space; + background: none; + } + + &__refresh { + &:hover { + color: $argo-running-color; + } + } + + &__body { + margin-bottom: 1em; + padding-bottom: 0.75em; + border-bottom: 1px solid $argo-color-gray-4; + &--dark { + border-bottom: 1px solid $silver-lining; + } + } + + header { + color: $argo-color-gray-8; + display: flex; + align-items: center; + font-weight: 600; + font-size: 20px; + margin-bottom: 1em; + } + + &--dark header { + color: $shine; + border-bottom: 1px solid $silver-lining; + } + &__actions { + position: relative; + display: flex; + align-items: center; + margin-top: 1.5em; + z-index: 10 !important; + } + } +} diff --git a/ui/src/app/components/rollouts-home/rollouts-home.tsx b/ui/src/app/components/rollouts-home/rollouts-home.tsx new file mode 100644 index 0000000000..bd70caa2d6 --- /dev/null +++ b/ui/src/app/components/rollouts-home/rollouts-home.tsx @@ -0,0 +1,188 @@ +import * as React from 'react'; + +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {faCircleNotch} from '@fortawesome/free-solid-svg-icons'; + +import {NamespaceContext} from '../../shared/context/api'; +import {useWatchRollouts} from '../../shared/services/rollout'; +import {RolloutsToolbar, defaultDisplayMode, Filters} from '../rollouts-toolbar/rollouts-toolbar'; +import {RolloutsTable} from '../rollouts-table/rollouts-table'; +import {RolloutsGrid} from '../rollouts-grid/rollouts-grid'; +import './rollouts-home.scss'; + +export const RolloutsHome = () => { + const rolloutsList = useWatchRollouts(); + const rollouts = rolloutsList.items; + const loading = rolloutsList.loading; + const namespaceCtx = React.useContext(NamespaceContext); + + const [filters, setFilters] = React.useState({ + showRequiresAttention: false, + showFavorites: false, + name: '', + displayMode: defaultDisplayMode, + status: { + progressing: false, + degraded: false, + paused: false, + healthy: false, + }, + }); + + const handleFilterChange = (newFilters: Filters) => { + setFilters(newFilters); + }; + + const [favorites, setFavorites] = React.useState(() => { + const favoritesStr = localStorage.getItem('rolloutsFavorites'); + return favoritesStr ? JSON.parse(favoritesStr) : {}; + }); + + const handleFavoriteChange = (rolloutName: string, isFavorite: boolean) => { + const newFavorites = {...favorites}; + if (isFavorite) { + newFavorites[rolloutName] = true; + } else { + delete newFavorites[rolloutName]; + } + setFavorites(newFavorites); + localStorage.setItem('rolloutsFavorites', JSON.stringify(newFavorites)); + }; + + const filteredRollouts = React.useMemo(() => { + return rollouts.filter((r) => { + // If no filters are set, show all rollouts + if (filters.name === '' && !filters.showFavorites && !filters.showRequiresAttention && !Object.values(filters.status).some((value) => value === true)) { + return true; + } + + const statusFiltersSet = Object.values(filters.status).some((value) => value === true); + const nameFilterSet = filters.name !== ''; + + let favoritesMatches = false; + let requiresAttentionMatches = false; + let statusMatches = false; + let nameMatches = false; + + if (filters.showFavorites && favorites[r.objectMeta.name]) { + favoritesMatches = true; + } + if (filters.showRequiresAttention && (r.status === 'Unknown' || r.status === 'Degraded' || (r.status === 'Paused' && r.message !== 'CanaryPauseStep'))) { + requiresAttentionMatches = true; + } + if (statusFiltersSet && filters.status[r.status]) { + statusMatches = true; + } + + for (let term of filters.name.split(',').map((t) => t.trim())) { + if (term === '') continue; // Skip empty terms + + if (term.includes(':')) { + // Filter by label + const [key, value] = term.split(':'); + if (value.startsWith('"') && value.endsWith('"')) { + const exactValue = value.substring(1, value.length - 1); + if (r.objectMeta.labels && r.objectMeta.labels[key] && r.objectMeta.labels[key] === exactValue) { + nameMatches = true; + break; + } + } else if (r.objectMeta.labels && r.objectMeta.labels[key] && r.objectMeta.labels[key].includes(value)) { + nameMatches = true; + break; + } + } else { + // Filter by name + const isNegated = term.startsWith('!'); + term = term.replace(/^!/, ''); + + const isExact = term.startsWith('"') && term.endsWith('"'); + term = term.replace(/"/g, ''); + + if (isExact) { + if (isNegated) { + if (r.objectMeta.name !== term) { + nameMatches = true; + continue; + } + } else { + if (r.objectMeta.name === term) { + nameMatches = true; + break; + } + } + } else { + if (isNegated) { + if (!r.objectMeta.name.includes(term)) { + nameMatches = true; + break; + } + } else { + if (r.objectMeta.name.includes(term)) { + nameMatches = true; + break; + } + } + } + } + } + + return ( + (!nameFilterSet || nameMatches) && + (!filters.showFavorites || favoritesMatches) && + (!filters.showRequiresAttention || requiresAttentionMatches) && + (!statusFiltersSet || statusMatches) + ); + }); + }, [rollouts, filters, favorites]); + + return ( +
    + +
    + {loading ? ( +
    + + Loading... +
    + ) : (rollouts || []).length > 0 ? ( + + {filters.displayMode === 'table' && } + {filters.displayMode !== 'table' && } + + ) : ( + + )} +
    +
    + ); +}; + +const EmptyMessage = (props: {namespace: string}) => { + const CodeLine = (props: {children: string}) => { + return ( +
     navigator.clipboard.writeText(props.children)}
    +            onKeyDown={() => navigator.clipboard.writeText(props.children)}
    +            >{props.children}
    ); + }; + return ( +
    +

    No Rollouts to display!

    +
    +
    Make sure you are running the API server in the correct namespace. Your current namespace is:
    +
    + {props.namespace} +
    +
    +
    + To create a new Rollout and Service, run + kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/basic/rollout.yaml + kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/basic/service.yaml + or follow the{' '} + + Getting Started guide + + . +
    +
    + ); +}; diff --git a/ui/src/app/components/rollouts-list/rollouts-list.tsx b/ui/src/app/components/rollouts-list/rollouts-list.tsx deleted file mode 100644 index e8c9a4dc5f..0000000000 --- a/ui/src/app/components/rollouts-list/rollouts-list.tsx +++ /dev/null @@ -1,258 +0,0 @@ -import * as React from 'react'; -import {Key, KeybindingContext, useNav} from 'react-keyhooks'; -import {Link, useHistory} from 'react-router-dom'; -import {RolloutInfo} from '../../../models/rollout/rollout'; -import {NamespaceContext} from '../../shared/context/api'; -import {useWatchRollout, useWatchRollouts} from '../../shared/services/rollout'; -import {useClickOutside} from '../../shared/utils/utils'; -import {ParsePodStatus, PodStatus, ReplicaSets} from '../pods/pods'; -import {RolloutAction, RolloutActionButton} from '../rollout-actions/rollout-actions'; -import {RolloutStatus, StatusIcon} from '../status-icon/status-icon'; -import './rollouts-list.scss'; -import {AutoComplete, Tooltip} from 'antd'; -import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; -import {faCircleNotch, faRedoAlt} from '@fortawesome/free-solid-svg-icons'; -import {InfoItemKind, InfoItemRow} from '../info-item/info-item'; - -const useRolloutNames = (rollouts: RolloutInfo[]) => { - const parseNames = (rl: RolloutInfo[]) => - (rl || []).map((r) => { - const name = r.objectMeta?.name || ''; - return { - label: name, - value: name, - }; - }); - - const [rolloutNames, setRolloutNames] = React.useState(parseNames(rollouts)); - React.useEffect(() => { - setRolloutNames(parseNames(rollouts)); - }, [rollouts]); - - return rolloutNames; -}; - -export const RolloutsList = () => { - const rolloutsList = useWatchRollouts(); - const rollouts = rolloutsList.items; - const loading = rolloutsList.loading; - const [filteredRollouts, setFilteredRollouts] = React.useState(rollouts); - const [pos, nav, reset] = useNav(filteredRollouts.length); - const [searchString, setSearchString] = React.useState(''); - const searchParam = new URLSearchParams(window.location.search).get('q'); - React.useEffect(() => { - if (searchParam && searchParam != searchString) { - setSearchString(searchParam); - } - }, []); - - const searchRef = React.useRef(null); - - React.useEffect(() => { - if (searchRef.current) { - // or, if Input component in your ref, then use input property like: - // searchRef.current.input.focus(); - searchRef.current.focus(); - } - }, [searchRef]); - - const {useKeybinding} = React.useContext(KeybindingContext); - - useKeybinding(Key.RIGHT, () => nav(1)); - useKeybinding(Key.LEFT, () => nav(-1)); - useKeybinding(Key.ESCAPE, () => { - reset(); - if (searchString && searchString !== '') { - setSearchString(''); - return true; - } else { - return false; - } - }); - - const rolloutNames = useRolloutNames(rollouts); - const history = useHistory(); - - useKeybinding(Key.SLASH, () => { - if (!searchString) { - if (searchRef) { - searchRef.current.focus(); - } - return true; - } - return false; - }); - - useKeybinding(Key.ENTER, () => { - if (pos > -1) { - history.push(`/rollout/${filteredRollouts[pos].objectMeta?.name}`); - return true; - } - return false; - }); - - React.useEffect(() => { - const filtered = (rollouts || []).filter((r) => (r.objectMeta?.name || '').includes(searchString)); - if ((filtered || []).length > 0) { - setFilteredRollouts(filtered); - } - if (searchString) { - history.replace(`/${namespaceCtx.namespace}?q=${searchString}`); - } else { - history.replace(`/${namespaceCtx.namespace}`); - } - }, [searchString, rollouts]); - - const namespaceCtx = React.useContext(NamespaceContext); - - return ( -
    - {loading ? ( -
    - - Loading... -
    - ) : (rollouts || []).length > 0 ? ( - -
    -
    - history.push(`/rollout/${namespaceCtx.namespace}/${val}`)} - options={rolloutNames} - onChange={(val) => setSearchString(val)} - value={searchString} - ref={searchRef} - /> -
    -
    -
    - {(filteredRollouts.sort((a, b) => (a.objectMeta.name < b.objectMeta.name ? -1 : 1)) || []).map((rollout, i) => ( - reset()} /> - ))} -
    -
    - ) : ( - - )} -
    - ); -}; - -const EmptyMessage = (props: {namespace: string}) => { - const CodeLine = (props: {children: string}) => { - return
     navigator.clipboard.writeText(props.children)}>{props.children}
    ; - }; - return ( -
    -

    No Rollouts to display!

    -
    -
    Make sure you are running the API server in the correct namespace. Your current namespace is:
    -
    - {props.namespace} -
    -
    -
    - To create a new Rollout and Service, run - kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/basic/rollout.yaml - kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-rollouts/master/docs/getting-started/basic/service.yaml - or follow the{' '} - - Getting Started guide - - . -
    -
    - ); -}; - -export const isInProgress = (rollout: RolloutInfo): boolean => { - for (const rs of rollout.replicaSets || []) { - for (const p of rs.pods || []) { - const status = ParsePodStatus(p.status); - if (status === PodStatus.Pending) { - return true; - } - } - } - return false; -}; - -export const RolloutWidget = (props: {rollout: RolloutInfo; deselect: () => void; selected?: boolean}) => { - const [watching, subscribe] = React.useState(false); - let rollout = props.rollout; - useWatchRollout(props.rollout?.objectMeta?.name, watching, null, (r: RolloutInfo) => (rollout = r)); - const ref = React.useRef(null); - useClickOutside(ref, props.deselect); - - React.useEffect(() => { - if (watching) { - const to = setTimeout(() => { - if (!isInProgress(rollout)) { - subscribe(false); - } - }, 5000); - return () => clearTimeout(to); - } - }, [watching, rollout]); - - return ( - - { - subscribe(true); - setTimeout(() => { - subscribe(false); - }, 1000); - }} - /> -
    - - {(rollout.strategy || '').toLocaleLowerCase() === 'canary' && } -
    - {(rollout.replicaSets || []).length < 1 && } - -
    - subscribe(true)} indicateLoading /> - subscribe(true)} indicateLoading /> -
    - - ); -}; - -const WidgetHeader = (props: {rollout: RolloutInfo; refresh: () => void}) => { - const {rollout} = props; - const [loading, setLoading] = React.useState(false); - React.useEffect(() => { - setTimeout(() => setLoading(false), 500); - }, [loading]); - return ( -
    - {rollout.objectMeta?.name} - - - { - props.refresh(); - setLoading(true); - e.preventDefault(); - }} - /> - - - -
    - ); -}; diff --git a/ui/src/app/components/rollouts-table/rollouts-table.scss b/ui/src/app/components/rollouts-table/rollouts-table.scss new file mode 100644 index 0000000000..bd93df799a --- /dev/null +++ b/ui/src/app/components/rollouts-table/rollouts-table.scss @@ -0,0 +1,18 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +.rollouts-table { + width: 100%; +} + +.rollouts-table__row__selected { + background-color: $argo-color-gray-2; + } + +.rollouts-table_widget_actions { + display: flex; + flex-wrap: wrap; +} + +.rollouts-table_widget_actions_button { + margin-top: 10px; +} diff --git a/ui/src/app/components/rollouts-table/rollouts-table.tsx b/ui/src/app/components/rollouts-table/rollouts-table.tsx new file mode 100644 index 0000000000..38124f1e60 --- /dev/null +++ b/ui/src/app/components/rollouts-table/rollouts-table.tsx @@ -0,0 +1,281 @@ +import * as React from 'react'; +import {useHistory} from 'react-router-dom'; +import {Tooltip, Table, TablePaginationConfig} from 'antd'; +import {Key, KeybindingContext} from 'react-keyhooks'; +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {IconDefinition} from '@fortawesome/fontawesome-svg-core'; +import {faStar as faStarSolid} from '@fortawesome/free-solid-svg-icons'; +import {faStar as faStarOutline} from '@fortawesome/free-regular-svg-icons/faStar'; + +import {RolloutAction, RolloutActionButton} from '../rollout-actions/rollout-actions'; +import {RolloutStatus, StatusIcon} from '../status-icon/status-icon'; +import {ReplicaSetStatus, ReplicaSetStatusIcon} from '../status-icon/status-icon'; +import {RolloutInfo} from '../../../models/rollout/rollout'; +import {InfoItemKind, InfoItemRow} from '../info-item/info-item'; +import { AlignType } from 'rc-table/lib/interface'; +import './rollouts-table.scss'; + +export const RolloutsTable = ({ + rollouts, + onFavoriteChange, + favorites, +}: { + rollouts: RolloutInfo[]; + onFavoriteChange: (rolloutName: string, isFavorite: boolean) => void; + favorites: {[key: string]: boolean}; +}) => { + const tableRef = React.useRef(null); + + const handleFavoriteChange = (rolloutName: string, isFavorite: boolean) => { + onFavoriteChange(rolloutName, isFavorite); + }; + const data = rollouts + .map((rollout) => { + return { + ...rollout, + key: rollout.objectMeta?.uid, + favorite: favorites[rollout.objectMeta?.name] || false, + }; + }) + .sort((a, b) => { + if (a.favorite && !b.favorite) { + return -1; + } else if (!a.favorite && b.favorite) { + return 1; + } else { + return 0; + } + }); + + const columns = [ + { + dataIndex: 'favorite', + key: 'favorite', + render: (favorite: boolean, rollout: RolloutInfo) => { + return favorite ? ( + + ) : ( + + ); + }, + width: 50, + }, + { + title: 'Name', + dataIndex: 'objectMeta', + key: 'name', + width: 300, + render: (objectMeta: {name?: string}) => objectMeta.name, + sorter: (a: any, b: any) => a.objectMeta.name.localeCompare(b.objectMeta.name), + }, + { + title: 'Strategy', + dataIndex: 'strategy', + key: 'strategy', + align: 'left' as AlignType, + sorter: (a: any, b: any) => a.strategy.localeCompare(b.strategy), + render: (strategy: string) => { + return ( + + ); + }, + }, + { + title: 'Step', + dataIndex: 'step', + key: 'step', + render: (text: any, record: {step?: string}) => record.step || '-', + sorter: (a: any, b: any) => { + if (a.step === undefined) { + return -1; + } + if (b.step === undefined) { + return 1; + } else return a.step.localeCompare(b.step); + }, + }, + { + title: 'Weight', + dataIndex: 'setWeight', + key: 'weight', + render: (text: any, record: {setWeight?: number}) => record.setWeight || '-', + sorter: (a: any, b: any) => a.setWeight - b.setWeight, + }, + { + title: 'ReplicaSets', + key: 'replicasets', + width: 200, + sorter: (a: RolloutInfo, b: RolloutInfo) => a.desired - b.desired, + render: (rollout: RolloutInfo) => { + const stableReplicaSets = rollout.replicaSets?.filter((rs) => rs.stable); + const canaryReplicaSets = rollout.replicaSets?.filter((rs) => rs.canary); + const previewReplicaSets = rollout.replicaSets?.filter((rs) => rs.preview); + return ( +
    + {stableReplicaSets?.length > 0 && ( +
    + Stable:{' '} + {stableReplicaSets.map((rs) => ( + + + Rev {rs.revision} ({rs.available}/{rs.replicas}) + + + ))} +
    + )} + {canaryReplicaSets?.length > 0 && ( +
    + Canary:{' '} + {canaryReplicaSets.map((rs) => ( + + + Rev {rs.revision} ({rs.available}/{rs.replicas}) + + + ))} +
    + )} + {previewReplicaSets?.length > 0 && ( +
    + Preview:{' '} + {previewReplicaSets.map((rs) => ( + + + Rev {rs.revision} ({rs.available}/{rs.replicas}) + + + ))} +
    + )} +
    + ); + }, + }, + { + title: 'Status', + sorter: (a: any, b: any) => a.status.localeCompare(b.status), + render: (record: {message?: string; status?: string}) => { + return ( +
    + + {record.status} + +
    + ); + }, + }, + { + title: 'Actions', + dataIndex: 'actions', + key: 'actions', + render: (text: any, rollout: {objectMeta?: {name?: string}}) => { + return ( +
    +
    + {}} indicateLoading /> +
    +
    + {}} indicateLoading /> +
    +
    + {}} indicateLoading /> +
    +
    + {}} indicateLoading /> +
    +
    + ); + }, + }, + ]; + + const history = useHistory(); + const [selectedRow, setSelectedRow] = React.useState(undefined); + const {useKeybinding} = React.useContext(KeybindingContext); + useKeybinding(Key.UP, () => { + if (selectedRow === undefined) { + setSelectedRow(itemsPerPage - 1); + return true; + } else if (selectedRow > 0) { + setSelectedRow(selectedRow - 1); + return true; + } + return false; + }); + useKeybinding(Key.DOWN, () => { + if (selectedRow === undefined) { + setSelectedRow(0); + return true; + } else if (selectedRow < itemsPerPage - 1) { + setSelectedRow(selectedRow + 1); + return true; + } + return false; + }); + useKeybinding(Key.ENTER, () => { + if (selectedRow !== undefined) { + history.push(`/rollout/${data[selectedRow].objectMeta?.name}`); + return true; + } + return false; + }); + useKeybinding(Key.ESCAPE, () => { + setSelectedRow(undefined); + return false; // let the toolbar handle clearing the search bar + }); + + const [itemsPerPage, setItemsPerPage] = React.useState(10); + const handlePaginationChange = (pagination: TablePaginationConfig) => { + setItemsPerPage(pagination.pageSize); + }; + + return ( + ({ + className: selectedRow === index ? 'rollouts-table__row__selected' : '', + onClick: () => { + history.push(`/rollout/${record.objectMeta?.name}`); + }, + style: {cursor: 'pointer'}, + })} + pagination={ + { + pageSize: itemsPerPage, + onChange: handlePaginationChange, + } as TablePaginationConfig + } + ref={tableRef} + rowClassName='rollouts-table__row' + rowKey={(_, index) => index} + style={{width: '100%', padding: '20px 20px'}} + /> + ); +}; diff --git a/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.scss b/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.scss new file mode 100644 index 0000000000..bc234b43c3 --- /dev/null +++ b/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.scss @@ -0,0 +1,55 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +.rollouts-toolbar { + display: flex; + justify-content: space-between; + align-items: center; + padding: 10px 10px; + border-bottom: 1px solid #fff; + background-color: #fff; +} + +.rollouts-toolbar_requires-attention-checkbox { + flex: 2; + padding-left: 20px; +} + +.rollouts-toolbar_search-container { + min-width: 300px; + padding-left: 20px; + padding-right: 20px; +} + +.rollouts-toolbar_display-modes { + margin-left: auto; +} + +.rollouts-toolbar_mode-button { + color: #989898; + border: none; + padding: 5px; + cursor: pointer; + transition: background-color 0.3s ease; + font-size: 24px; // increase the font-size to make the icon larger +} + +.rollouts-toolbar_mode-button:hover { + background-color: #b2b2b2; +} + +.rollouts-toolbar_mode-button.active { + color: #000000; +} + +.rollouts-toolbar_status-button { + cursor: pointer; + transition: background-color 0.3s ease; +} + +.rollouts-toolbar_status-filters { + display: flex; +} + +.rollouts-toolbar_status-buttons { + margin-left: 20px; +} \ No newline at end of file diff --git a/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.tsx b/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.tsx new file mode 100644 index 0000000000..6863258bc7 --- /dev/null +++ b/ui/src/app/components/rollouts-toolbar/rollouts-toolbar.tsx @@ -0,0 +1,239 @@ +import * as React from 'react'; + +import {Key, KeybindingContext} from 'react-keyhooks'; +import {useHistory, useLocation} from 'react-router-dom'; + +import {AutoComplete} from 'antd'; +import {Tooltip} from 'antd'; + +import {FontAwesomeIcon} from '@fortawesome/react-fontawesome'; +import {faTableList, faTableCellsLarge} from '@fortawesome/free-solid-svg-icons'; + +import {RolloutInfo} from '../../../models/rollout/rollout'; +import {StatusCount} from '../status-count/status-count'; +import './rollouts-toolbar.scss'; + +export type Filters = { + showRequiresAttention: boolean; + showFavorites?: boolean; + name: string; + displayMode?: string; + status: { + [key: string]: boolean; + }; +}; + +interface StatusCount { + [key: string]: number; +} + +export const defaultDisplayMode = 'grid'; + +export const RolloutsToolbar = ({ + rollouts, + favorites, + onFilterChange, +}: { + rollouts: RolloutInfo[]; + favorites: {[key: string]: boolean}; + onFilterChange: (filters: Filters) => void; +}) => { + const history = useHistory(); + const location = useLocation(); + const searchParams = new URLSearchParams(window.location.search); + const [filters, setFilters] = React.useState({ + showRequiresAttention: searchParams.get('showRequiresAttention') === 'true', + showFavorites: searchParams.get('showFavorites') === 'true', + name: searchParams.get('name') || '', + displayMode: searchParams.get('displayMode') || defaultDisplayMode, + status: { + Progressing: searchParams.get('Progressing') === 'true', + Degraded: searchParams.get('Degraded') === 'true', + Paused: searchParams.get('Paused') === 'true', + Healthy: searchParams.get('Healthy') === 'true', + }, + }); + + // Ensure that the filters are updated when the URL changes + React.useEffect(() => { + onFilterChange(filters); + }, [filters]); + + const handleFilterChange = (newFilters: Filters) => { + setFilters(newFilters); + onFilterChange(newFilters); + }; + + const handleNameFilterChange = (value: string) => { + const newFilters = { + ...filters, + name: value, + }; + const searchParams = new URLSearchParams(location.search); + if (value) { + searchParams.set('name', value); + } else { + searchParams.delete('name'); + } + history.push({search: searchParams.toString()}); + handleFilterChange(newFilters); + }; + + const handleShowRequiresAttentionChange = (event: React.MouseEvent) => { + const newFilters = { + ...filters, + showRequiresAttention: !filters.showRequiresAttention, + }; + const searchParams = new URLSearchParams(location.search); + if (!filters.showRequiresAttention) { + searchParams.set('showRequiresAttention', 'true'); + } else { + searchParams.delete('showRequiresAttention'); + } + history.push({search: searchParams.toString()}); + handleFilterChange(newFilters); + }; + + const handleShowFavoritesChange = (event: React.MouseEvent) => { + const newFilters = { + ...filters, + showFavorites: !filters.showFavorites, + }; + const searchParams = new URLSearchParams(location.search); + if (!filters.showFavorites) { + searchParams.set('showFavorites', 'true'); + } else { + searchParams.delete('showFavorites'); + } + history.push({search: searchParams.toString()}); + handleFilterChange(newFilters); + }; + + const handleDisplayModeChange = (event: React.MouseEvent) => { + const newFilters = { + ...filters, + displayMode: event.currentTarget.id, + }; + const searchParams = new URLSearchParams(location.search); + if (event.currentTarget.id !== defaultDisplayMode) { + searchParams.set('displayMode', event.currentTarget.id); + } else { + searchParams.delete('displayMode'); + searchParams.delete('displaymode'); + } + history.push({search: searchParams.toString()}); + handleFilterChange(newFilters); + }; + + const handleStatusFilterChange = (event: React.MouseEvent) => { + const searchParams = new URLSearchParams(location.search); + + if (!filters.status[event.currentTarget.id]) { + searchParams.set(event.currentTarget.id, 'true'); + } else { + searchParams.delete(event.currentTarget.id); + } + history.push({search: searchParams.toString()}); + + const newFilters = { + ...filters, + status: { + ...filters.status, + [event.currentTarget.id]: !filters.status[event.currentTarget.id], + }, + }; + handleFilterChange(newFilters); + }; + + const statusCounts: StatusCount = React.useMemo(() => { + const counts: StatusCount = { + Progressing: 0, + Degraded: 0, + Paused: 0, + Healthy: 0, + }; + rollouts.forEach((r) => { + counts[r.status]++; + }); + + return counts; + }, [rollouts]); + + const needsAttentionCount: number = React.useMemo(() => { + const pausedRollouts = rollouts.filter((r) => r.status === 'Paused' && r.message !== 'CanaryPauseStep'); + const degradedRollouts = rollouts.filter((r) => r.status === 'Degraded'); + const unknownRollouts = rollouts.filter((r) => r.status === 'Unknown'); + return pausedRollouts.length + degradedRollouts.length + unknownRollouts.length; + }, [rollouts, statusCounts]); + + const favoriteCount: number = React.useMemo(() => { + return rollouts.filter((r) => favorites[r.objectMeta.name]).length; + }, [rollouts, favorites]); + + const searchRef = React.useRef(null); + const {useKeybinding} = React.useContext(KeybindingContext); + useKeybinding(Key.SLASH, () => { + if (searchRef) { + searchRef.current.focus(); + return true; + } + return false; + }); + + return ( +
    +
    + + + + + + +
    + {Object.keys(statusCounts).map((status: string) => { + return ( + + + + ); + })} +
    +
    +
    + + + + + + +
    + +
    + { + event.stopPropagation(); // Prevents shift+H from opening the help menu + }} + ref={searchRef} + /> +
    +
    +
    + ); +}; diff --git a/ui/src/app/components/status-count/status-count.scss b/ui/src/app/components/status-count/status-count.scss new file mode 100644 index 0000000000..9dec534d27 --- /dev/null +++ b/ui/src/app/components/status-count/status-count.scss @@ -0,0 +1,31 @@ +@import 'node_modules/argo-ui/v2/styles/colors'; + +.status-count { + display: flex; + align-items: center; + border: 1px solid $argo-color-gray-4; + border-radius: 5px; + padding: 2px; + margin: 1px; + + &__icon { + font-size: 15px; + color: $argo-color-gray-8; + margin: 5px; + + text-align: center; + flex: 0 0 auto; + } + + &__count { + font-size: 15px; + font-weight: 500; + color: $argo-color-gray-8; + margin-right: 5px; + text-align: right; + flex: 1; + } +} +.status-count.active { + background-color: $argo-color-teal-2; +} diff --git a/ui/src/app/components/status-count/status-count.tsx b/ui/src/app/components/status-count/status-count.tsx new file mode 100644 index 0000000000..83cea4f5ac --- /dev/null +++ b/ui/src/app/components/status-count/status-count.tsx @@ -0,0 +1,16 @@ +import * as React from 'react'; + +import {RolloutStatus, StatusIcon} from '../status-icon/status-icon'; + +import './status-count.scss'; + +export const StatusCount = ({status, count, defaultIcon = 'fa-exclamation-circle', active = false}: {status: String; count: Number; defaultIcon?: String; active?: boolean}) => { + return ( +
    +
    + +
    +
    {count}
    +
    + ); +}; diff --git a/ui/src/app/components/status-icon/status-icon.tsx b/ui/src/app/components/status-icon/status-icon.tsx index 257dc50567..5da619a357 100644 --- a/ui/src/app/components/status-icon/status-icon.tsx +++ b/ui/src/app/components/status-icon/status-icon.tsx @@ -9,9 +9,11 @@ export enum RolloutStatus { Healthy = 'Healthy', } -export const StatusIcon = (props: {status: RolloutStatus}): JSX.Element => { +export const StatusIcon = (props: {status: RolloutStatus; showTooltip?: boolean; defaultIcon?: String}): JSX.Element => { let icon, className; let spin = false; + const showTooltip = props.showTooltip ?? true; + const defaultIcon = props.defaultIcon ?? 'fa-question-circle'; const {status} = props; switch (status) { case 'Progressing': { @@ -36,14 +38,19 @@ export const StatusIcon = (props: {status: RolloutStatus}): JSX.Element => { break; } default: { - icon = 'fa-question-circle'; + icon = defaultIcon; className = 'unknown'; } } return ( - - - + + {showTooltip && ( + + + + )} + {!showTooltip && } + ); }; @@ -55,9 +62,11 @@ export enum ReplicaSetStatus { Progressing = 'Progressing', } -export const ReplicaSetStatusIcon = (props: {status: ReplicaSetStatus}) => { +export const ReplicaSetStatusIcon = (props: {status: ReplicaSetStatus; showTooltip?: boolean; defaultIcon?: String}) => { let icon, className; let spin = false; + const showTooltip = props.showTooltip ?? true; + const defaultIcon = props.defaultIcon ?? 'fa-question-circle'; const {status} = props; switch (status) { case 'Healthy': @@ -83,13 +92,18 @@ export const ReplicaSetStatusIcon = (props: {status: ReplicaSetStatus}) => { break; } default: { - icon = 'fa-question-circle'; + icon = defaultIcon; className = 'unknown'; } } return ( - - - + + {showTooltip && ( + + + + )} + {!showTooltip && } + ); }; diff --git a/ui/src/app/components/ticker/ticker.tsx b/ui/src/app/components/ticker/ticker.tsx new file mode 100644 index 0000000000..6b07fc9cca --- /dev/null +++ b/ui/src/app/components/ticker/ticker.tsx @@ -0,0 +1,40 @@ +import * as moment from 'moment'; +import * as React from 'react'; +import {interval, Subscription} from 'rxjs'; + +export class Ticker extends React.Component<{intervalMs?: number; disabled?: boolean; children?: (time: moment.Moment) => React.ReactNode}, {time: moment.Moment}> { + private subscription: Subscription | null = null; + + constructor(props: {intervalMs?: number; children?: (time: moment.Moment) => React.ReactNode}) { + super(props); + this.state = {time: moment()}; + this.ensureSubscribed(); + } + + public render() { + return this.props.children && this.props.children(this.state.time); + } + + public componentDidUpdate() { + this.ensureSubscribed(); + } + + public componentWillUnmount() { + this.ensureUnsubscribed(); + } + + private ensureSubscribed() { + if (this.props.disabled) { + this.ensureUnsubscribed(); + } else if (!this.subscription) { + this.subscription = interval(this.props.intervalMs || 1000).subscribe(() => this.setState({time: moment()})); + } + } + + private ensureUnsubscribed() { + if (this.subscription != null) { + this.subscription.unsubscribe(); + this.subscription = null; + } + } +} diff --git a/ui/src/app/index.tsx b/ui/src/app/index.tsx index 3bc5d04223..56521a1185 100644 --- a/ui/src/app/index.tsx +++ b/ui/src/app/index.tsx @@ -6,5 +6,5 @@ ReactDOM.render( , - document.getElementById('root') + document.getElementById('root'), ); diff --git a/ui/src/app/webpack.common.js b/ui/src/app/webpack.common.js index 0907062747..72d6d908ad 100644 --- a/ui/src/app/webpack.common.js +++ b/ui/src/app/webpack.common.js @@ -1,5 +1,9 @@ 'use strict;'; +const crypto = require("crypto"); +const crypto_orig_createHash = crypto.createHash; +crypto.createHash = algorithm => crypto_orig_createHash(algorithm == "md4" ? "sha256" : algorithm); + const CopyWebpackPlugin = require('copy-webpack-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); @@ -36,6 +40,12 @@ const config = { test: /\.css$/, loader: 'style-loader!raw-loader', }, + // https://github.com/fkhadra/react-toastify/issues/775#issuecomment-1149569290 + { + test: /\.mjs$/, + include: /node_modules/, + type: "javascript/auto" + }, ], }, node: { diff --git a/ui/src/config/theme.ts b/ui/src/config/theme.ts index c377896a2e..ef2390a25c 100644 --- a/ui/src/config/theme.ts +++ b/ui/src/config/theme.ts @@ -1,15 +1,18 @@ -import { ThemeConfig } from 'antd/es/config-provider'; +import {ThemeConfig} from 'antd/es/config-provider'; export const theme: ThemeConfig = { - components: { - Button: { - colorPrimary: '#44505f', - colorPrimaryBgHover: '#626f7e', - colorPrimaryHover: '#626f7e', - colorPrimaryActive: '#626f7e', - borderRadius: 100, - borderRadiusSM: 100, - borderRadiusLG: 100 - } - } + components: { + Button: { + colorPrimary: '#44505f', + colorPrimaryBgHover: '#626f7e', + colorPrimaryHover: '#626f7e', + colorPrimaryActive: '#626f7e', + borderRadius: 100, + borderRadiusSM: 100, + borderRadiusLG: 100, + }, + }, + token: { + colorPrimary: '#44505f', + }, }; diff --git a/ui/src/models/rollout/generated/api.ts b/ui/src/models/rollout/generated/api.ts index fe2a8eca01..114f705aee 100755 --- a/ui/src/models/rollout/generated/api.ts +++ b/ui/src/models/rollout/generated/api.ts @@ -244,6 +244,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRun * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunSpec */ measurementRetention?: Array; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunSpec + */ + ttlStrategy?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy; } /** * @@ -287,6 +293,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRun * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStatus */ dryRunSummary?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RunSummary; + /** + * + * @type {K8sIoApimachineryPkgApisMetaV1Time} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunStatus + */ + completedAt?: K8sIoApimachineryPkgApisMetaV1Time; } /** * @@ -307,6 +319,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRun */ unsuccessfulRunHistoryLimit?: number; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisTemplateRef + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisTemplateRef { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisTemplateRef + */ + templateName?: string; + /** + * + * @type {boolean} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisTemplateRef + */ + clusterScope?: boolean; +} /** * * @export @@ -472,6 +503,25 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ArgumentVal */ fieldRef?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1FieldRef; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication { + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Sigv4Config} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication + */ + sigv4?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Sigv4Config; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication + */ + oauth2?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config; +} /** * * @export @@ -958,7 +1008,7 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1CloudWatchM */ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric { /** - * + * +kubebuilder:default=\"5m\" Interval refers to the Interval time window in Datadog (default: 5m). Not to be confused with the polling rate for the metric. * @type {string} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric */ @@ -970,11 +1020,29 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetr */ query?: string; /** - * ApiVersion refers to the Datadog API version being used (default: v1). v1 will eventually be deprecated. + * + * @type {{ [key: string]: string; }} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric + */ + queries?: { [key: string]: string; }; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric + */ + formula?: string; + /** + * * @type {string} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric */ apiVersion?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1DatadogMetric + */ + aggregator?: string; } /** * DryRun defines the settings for running the analysis in Dry-Run mode. @@ -1614,6 +1682,37 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1NginxTraffi */ stableIngresses?: Array; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config { + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config + */ + tokenUrl?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config + */ + clientId?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config + */ + clientSecret?: string; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1OAuth2Config + */ + scopes?: Array; +} /** * * @export @@ -1638,6 +1737,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ObjectRef { * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ObjectRef */ name?: string; + /** + * + * @type {string} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1ObjectRef + */ + scaleDown?: string; } /** * @@ -1709,19 +1814,6 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PreferredDu */ weight?: number; } -/** - * - * @export - * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusAuth - */ -export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusAuth { - /** - * - * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Sigv4Config} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusAuth - */ - sigv4?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Sigv4Config; -} /** * * @export @@ -1742,10 +1834,10 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusM query?: string; /** * - * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusAuth} + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusMetric */ - authentication?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1PrometheusAuth; + authentication?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication; /** * * @type {string} @@ -1818,10 +1910,10 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Rollout { export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysis { /** * - * @type {Array} + * @type {Array} * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysis */ - templates?: Array; + templates?: Array; /** * * @type {Array} @@ -1891,25 +1983,6 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnal */ message?: string; } -/** - * - * @export - * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysisTemplate - */ -export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysisTemplate { - /** - * - * @type {string} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysisTemplate - */ - templateName?: string; - /** - * - * @type {boolean} - * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutAnalysisTemplate - */ - clusterScope?: boolean; -} /** * RolloutCondition describes the state of a rollout at a certain point. * @export @@ -1977,6 +2050,18 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExpe * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentStep */ analyses?: Array; + /** + * + * @type {Array} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentStep + */ + dryRun?: Array; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunMetadata} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutExperimentStep + */ + analysisRunMetadata?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1AnalysisRunMetadata; } /** * @@ -2410,6 +2495,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTraf * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting */ plugins?: { [key: string]: string; }; + /** + * + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1RolloutTrafficRouting + */ + maxTrafficWeight?: number; } /** * @@ -2743,6 +2834,31 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TLSRoute { */ sniHosts?: Array; } +/** + * + * @export + * @interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy + */ +export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy { + /** + * SecondsAfterCompletion is the number of seconds to live after completion. + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy + */ + secondsAfterCompletion?: number; + /** + * SecondsAfterFailure is the number of seconds to live after failure. + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy + */ + secondsAfterFailure?: number; + /** + * SecondsAfterSuccess is the number of seconds to live after success. + * @type {number} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1TTLStrategy + */ + secondsAfterSuccess?: number; +} /** * * @export @@ -2892,6 +3008,12 @@ export interface GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WebMetric { * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WebMetric */ jsonBody?: string; + /** + * + * @type {GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication} + * @memberof GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1WebMetric + */ + authentication?: GithubComArgoprojArgoRolloutsPkgApisRolloutsV1alpha1Authentication; } /** * @@ -3060,6 +3182,18 @@ export interface K8sIoApiBatchV1JobSpec { * @memberof K8sIoApiBatchV1JobSpec */ backoffLimit?: number; + /** + * + * @type {number} + * @memberof K8sIoApiBatchV1JobSpec + */ + backoffLimitPerIndex?: number; + /** + * + * @type {number} + * @memberof K8sIoApiBatchV1JobSpec + */ + maxFailedIndexes?: number; /** * * @type {K8sIoApimachineryPkgApisMetaV1LabelSelector} @@ -3085,17 +3219,23 @@ export interface K8sIoApiBatchV1JobSpec { */ ttlSecondsAfterFinished?: number; /** - * CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job. +optional + * completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job. +optional * @type {string} * @memberof K8sIoApiBatchV1JobSpec */ completionMode?: string; /** - * Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. +optional + * suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. +optional * @type {boolean} * @memberof K8sIoApiBatchV1JobSpec */ suspend?: boolean; + /** + * podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods when they are terminating (has a metadata.deletionTimestamp) or failed. - Failed means to wait until a previously created Pod is fully terminated (has phase Failed or Succeeded) before creating a replacement Pod. When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default. +optional + * @type {string} + * @memberof K8sIoApiBatchV1JobSpec + */ + podReplacementPolicy?: string; } /** * PodFailurePolicy describes how failed pods influence the backoffLimit. @@ -3123,7 +3263,7 @@ export interface K8sIoApiBatchV1PodFailurePolicyOnExitCodesRequirement { */ containerName?: string; /** - * Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied. + * - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied. * @type {string} * @memberof K8sIoApiBatchV1PodFailurePolicyOnExitCodesRequirement */ @@ -3155,13 +3295,13 @@ export interface K8sIoApiBatchV1PodFailurePolicyOnPodConditionsPattern { status?: string; } /** - * PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of OnExitCodes and onPodConditions, but not both, can be used in each rule. + * PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule. * @export * @interface K8sIoApiBatchV1PodFailurePolicyRule */ export interface K8sIoApiBatchV1PodFailurePolicyRule { /** - * Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. + * - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. This value is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. * @type {string} * @memberof K8sIoApiBatchV1PodFailurePolicyRule */ @@ -3433,6 +3573,62 @@ export interface K8sIoApiCoreV1CinderVolumeSource { */ secretRef?: K8sIoApiCoreV1LocalObjectReference; } +/** + * ClaimSource describes a reference to a ResourceClaim. Exactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value. + * @export + * @interface K8sIoApiCoreV1ClaimSource + */ +export interface K8sIoApiCoreV1ClaimSource { + /** + * ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + * @type {string} + * @memberof K8sIoApiCoreV1ClaimSource + */ + resourceClaimName?: string; + /** + * ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. + * @type {string} + * @memberof K8sIoApiCoreV1ClaimSource + */ + resourceClaimTemplateName?: string; +} +/** + * ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem. + * @export + * @interface K8sIoApiCoreV1ClusterTrustBundleProjection + */ +export interface K8sIoApiCoreV1ClusterTrustBundleProjection { + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1ClusterTrustBundleProjection + */ + name?: string; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1ClusterTrustBundleProjection + */ + signerName?: string; + /** + * + * @type {K8sIoApimachineryPkgApisMetaV1LabelSelector} + * @memberof K8sIoApiCoreV1ClusterTrustBundleProjection + */ + labelSelector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; + /** + * + * @type {boolean} + * @memberof K8sIoApiCoreV1ClusterTrustBundleProjection + */ + optional?: boolean; + /** + * Relative path from the volume root to write the bundle. + * @type {string} + * @memberof K8sIoApiCoreV1ClusterTrustBundleProjection + */ + path?: string; +} /** * ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables. * @export @@ -3593,6 +3789,18 @@ export interface K8sIoApiCoreV1Container { * @memberof K8sIoApiCoreV1Container */ resources?: K8sIoApiCoreV1ResourceRequirements; + /** + * + * @type {Array} + * @memberof K8sIoApiCoreV1Container + */ + resizePolicy?: Array; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1Container + */ + restartPolicy?: string; /** * * @type {Array} @@ -3709,6 +3917,25 @@ export interface K8sIoApiCoreV1ContainerPort { */ hostIP?: string; } +/** + * ContainerResizePolicy represents resource resize policy for the container. + * @export + * @interface K8sIoApiCoreV1ContainerResizePolicy + */ +export interface K8sIoApiCoreV1ContainerResizePolicy { + /** + * Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + * @type {string} + * @memberof K8sIoApiCoreV1ContainerResizePolicy + */ + resourceName?: string; + /** + * Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + * @type {string} + * @memberof K8sIoApiCoreV1ContainerResizePolicy + */ + restartPolicy?: string; +} /** * Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode. * @export @@ -3951,6 +4178,18 @@ export interface K8sIoApiCoreV1EphemeralContainerCommon { * @memberof K8sIoApiCoreV1EphemeralContainerCommon */ resources?: K8sIoApiCoreV1ResourceRequirements; + /** + * + * @type {Array} + * @memberof K8sIoApiCoreV1EphemeralContainerCommon + */ + resizePolicy?: Array; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1EphemeralContainerCommon + */ + restartPolicy?: string; /** * * @type {Array} @@ -4293,7 +4532,7 @@ export interface K8sIoApiCoreV1HTTPGetAction { */ export interface K8sIoApiCoreV1HTTPHeader { /** - * + * The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. * @type {string} * @memberof K8sIoApiCoreV1HTTPHeader */ @@ -4484,6 +4723,12 @@ export interface K8sIoApiCoreV1LifecycleHandler { * @memberof K8sIoApiCoreV1LifecycleHandler */ tcpSocket?: K8sIoApiCoreV1TCPSocketAction; + /** + * + * @type {K8sIoApiCoreV1SleepAction} + * @memberof K8sIoApiCoreV1LifecycleHandler + */ + sleep?: K8sIoApiCoreV1SleepAction; } /** * @@ -4638,10 +4883,10 @@ export interface K8sIoApiCoreV1PersistentVolumeClaimSpec { selector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; /** * - * @type {K8sIoApiCoreV1ResourceRequirements} + * @type {K8sIoApiCoreV1VolumeResourceRequirements} * @memberof K8sIoApiCoreV1PersistentVolumeClaimSpec */ - resources?: K8sIoApiCoreV1ResourceRequirements; + resources?: K8sIoApiCoreV1VolumeResourceRequirements; /** * * @type {string} @@ -4668,10 +4913,16 @@ export interface K8sIoApiCoreV1PersistentVolumeClaimSpec { dataSource?: K8sIoApiCoreV1TypedLocalObjectReference; /** * - * @type {K8sIoApiCoreV1TypedLocalObjectReference} + * @type {K8sIoApiCoreV1TypedObjectReference} * @memberof K8sIoApiCoreV1PersistentVolumeClaimSpec */ - dataSourceRef?: K8sIoApiCoreV1TypedLocalObjectReference; + dataSourceRef?: K8sIoApiCoreV1TypedObjectReference; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1PersistentVolumeClaimSpec + */ + volumeAttributesClassName?: string; } /** * PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource. @@ -4779,6 +5030,18 @@ export interface K8sIoApiCoreV1PodAffinityTerm { * @memberof K8sIoApiCoreV1PodAffinityTerm */ namespaceSelector?: K8sIoApimachineryPkgApisMetaV1LabelSelector; + /** + * + * @type {Array} + * @memberof K8sIoApiCoreV1PodAffinityTerm + */ + matchLabelKeys?: Array; + /** + * + * @type {Array} + * @memberof K8sIoApiCoreV1PodAffinityTerm + */ + mismatchLabelKeys?: Array; } /** * Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -4869,6 +5132,38 @@ export interface K8sIoApiCoreV1PodReadinessGate { */ conditionType?: string; } +/** + * PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. + * @export + * @interface K8sIoApiCoreV1PodResourceClaim + */ +export interface K8sIoApiCoreV1PodResourceClaim { + /** + * Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + * @type {string} + * @memberof K8sIoApiCoreV1PodResourceClaim + */ + name?: string; + /** + * + * @type {K8sIoApiCoreV1ClaimSource} + * @memberof K8sIoApiCoreV1PodResourceClaim + */ + source?: K8sIoApiCoreV1ClaimSource; +} +/** + * PodSchedulingGate is associated to a Pod to guard its scheduling. + * @export + * @interface K8sIoApiCoreV1PodSchedulingGate + */ +export interface K8sIoApiCoreV1PodSchedulingGate { + /** + * Name of the scheduling gate. Each scheduling gate must have a unique name field. + * @type {string} + * @memberof K8sIoApiCoreV1PodSchedulingGate + */ + name?: string; +} /** * PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext. * @export @@ -5164,6 +5459,18 @@ export interface K8sIoApiCoreV1PodSpec { * @memberof K8sIoApiCoreV1PodSpec */ hostUsers?: boolean; + /** + * SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. SchedulingGates can only be set at pod creation time, and be removed only afterwards. This is a beta feature enabled by the PodSchedulingReadiness feature gate. +patchMergeKey=name +patchStrategy=merge +listType=map +listMapKey=name +featureGate=PodSchedulingReadiness +optional + * @type {Array} + * @memberof K8sIoApiCoreV1PodSpec + */ + schedulingGates?: Array; + /** + * ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. +patchMergeKey=name +patchStrategy=merge,retainKeys +listType=map +listMapKey=name +featureGate=DynamicResourceAllocation +optional + * @type {Array} + * @memberof K8sIoApiCoreV1PodSpec + */ + resourceClaims?: Array; } /** * @@ -5425,6 +5732,19 @@ export interface K8sIoApiCoreV1RBDVolumeSource { */ readOnly?: boolean; } +/** + * ResourceClaim references one entry in PodSpec.ResourceClaims. + * @export + * @interface K8sIoApiCoreV1ResourceClaim + */ +export interface K8sIoApiCoreV1ResourceClaim { + /** + * Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + * @type {string} + * @memberof K8sIoApiCoreV1ResourceClaim + */ + name?: string; +} /** * * @export @@ -5468,6 +5788,12 @@ export interface K8sIoApiCoreV1ResourceRequirements { * @memberof K8sIoApiCoreV1ResourceRequirements */ requests?: { [key: string]: K8sIoApimachineryPkgApiResourceQuantity; }; + /** + * Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. +listType=map +listMapKey=name +featureGate=DynamicResourceAllocation +optional + * @type {Array} + * @memberof K8sIoApiCoreV1ResourceRequirements + */ + claims?: Array; } /** * @@ -5784,6 +6110,19 @@ export interface K8sIoApiCoreV1ServiceAccountTokenProjection { */ path?: string; } +/** + * SleepAction describes a \"sleep\" action. + * @export + * @interface K8sIoApiCoreV1SleepAction + */ +export interface K8sIoApiCoreV1SleepAction { + /** + * Seconds is the number of seconds to sleep. + * @type {string} + * @memberof K8sIoApiCoreV1SleepAction + */ + seconds?: string; +} /** * Represents a StorageOS persistent volume resource. * @export @@ -5933,19 +6272,19 @@ export interface K8sIoApiCoreV1TopologySpreadConstraint { */ minDomains?: number; /** - * NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. +optional + * NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. +optional * @type {string} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ nodeAffinityPolicy?: string; /** - * NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. +optional + * NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. +optional * @type {string} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ nodeTaintsPolicy?: string; /** - * + * MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). +listType=atomic +optional * @type {Array} * @memberof K8sIoApiCoreV1TopologySpreadConstraint */ @@ -5976,6 +6315,37 @@ export interface K8sIoApiCoreV1TypedLocalObjectReference { */ name?: string; } +/** + * + * @export + * @interface K8sIoApiCoreV1TypedObjectReference + */ +export interface K8sIoApiCoreV1TypedObjectReference { + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1TypedObjectReference + */ + apiGroup?: string; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1TypedObjectReference + */ + kind?: string; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1TypedObjectReference + */ + name?: string; + /** + * + * @type {string} + * @memberof K8sIoApiCoreV1TypedObjectReference + */ + namespace?: string; +} /** * Volume represents a named volume in a pod that may be accessed by any container in the pod. * @export @@ -6087,6 +6457,31 @@ export interface K8sIoApiCoreV1VolumeProjection { * @memberof K8sIoApiCoreV1VolumeProjection */ serviceAccountToken?: K8sIoApiCoreV1ServiceAccountTokenProjection; + /** + * + * @type {K8sIoApiCoreV1ClusterTrustBundleProjection} + * @memberof K8sIoApiCoreV1VolumeProjection + */ + clusterTrustBundle?: K8sIoApiCoreV1ClusterTrustBundleProjection; +} +/** + * VolumeResourceRequirements describes the storage resource requirements for a volume. + * @export + * @interface K8sIoApiCoreV1VolumeResourceRequirements + */ +export interface K8sIoApiCoreV1VolumeResourceRequirements { + /** + * + * @type {{ [key: string]: K8sIoApimachineryPkgApiResourceQuantity; }} + * @memberof K8sIoApiCoreV1VolumeResourceRequirements + */ + limits?: { [key: string]: K8sIoApimachineryPkgApiResourceQuantity; }; + /** + * + * @type {{ [key: string]: K8sIoApimachineryPkgApiResourceQuantity; }} + * @memberof K8sIoApiCoreV1VolumeResourceRequirements + */ + requests?: { [key: string]: K8sIoApimachineryPkgApiResourceQuantity; }; } /** * Represents the source of a volume to mount. Only one of its members may be specified. @@ -6402,7 +6797,7 @@ export interface K8sIoApimachineryPkgApisMetaV1LabelSelector { */ export interface K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement { /** - * + * key is the label key that the selector applies to. * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement */ @@ -6488,7 +6883,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { */ generateName?: string; /** - * Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces +optional + * Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces +optional * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1ObjectMeta */ @@ -6500,7 +6895,7 @@ export interface K8sIoApimachineryPkgApisMetaV1ObjectMeta { */ selfLink?: string; /** - * UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids +optional + * UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids +optional * @type {string} * @memberof K8sIoApimachineryPkgApisMetaV1ObjectMeta */ @@ -7126,6 +7521,12 @@ export interface RolloutReplicaSetInfo { * @memberof RolloutReplicaSetInfo */ pong?: boolean; + /** + * + * @type {Array} + * @memberof RolloutReplicaSetInfo + */ + initContainerImages?: Array; } /** * @@ -7291,6 +7692,12 @@ export interface RolloutRolloutInfo { * @memberof RolloutRolloutInfo */ steps?: Array; + /** + * + * @type {Array} + * @memberof RolloutRolloutInfo + */ + initContainers?: Array; } /** * diff --git a/ui/tsconfig.json b/ui/tsconfig.json index 4a31dd453b..33aaf1dfd9 100644 --- a/ui/tsconfig.json +++ b/ui/tsconfig.json @@ -2,14 +2,16 @@ "compilerOptions": { "sourceMap": true, "noImplicitAny": true, - "module": "commonjs", + "module": "CommonJS", "target": "es6", "jsx": "react", "experimentalDecorators": true, "noUnusedLocals": true, "declaration": false, - "lib": ["es2017", "dom"] + "lib": ["es2017", "dom"], + "allowSyntheticDefaultImports": true }, "include": ["./**/*"], - "exclude": ["node_modules", "./**/*.test.ts", "./**/*.test.tsx"] + "exclude": ["node_modules", "./**/*.test.tsx"], + "types": ["node", "jest"] } diff --git a/ui/yarn.lock b/ui/yarn.lock index 447790e646..c8fbf6b82b 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -2,6 +2,14 @@ # yarn lockfile v1 +"@ampproject/remapping@^2.2.0": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" + integrity sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg== + dependencies: + "@jridgewell/gen-mapping" "^0.3.0" + "@jridgewell/trace-mapping" "^0.3.9" + "@ant-design/colors@^7.0.0": version "7.0.0" resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-7.0.0.tgz#eb7eecead124c3533aea05d61254f0a17f2b61b3" @@ -9,6 +17,19 @@ dependencies: "@ctrl/tinycolor" "^3.4.0" +"@ant-design/cssinjs@^1.17.5": + version "1.18.0" + resolved "https://registry.yarnpkg.com/@ant-design/cssinjs/-/cssinjs-1.18.0.tgz#92701684cab5fc67bb62bc813ec1b4a33258018d" + integrity sha512-NXzfnNjJgpn+L6d0cD2cS14Tsqs46Bsua6PwVMlmN+F0OEoa9PhJRwUWmI+HyIrc4cgVZVfQTDpXC0p07Jmglw== + dependencies: + "@babel/runtime" "^7.11.1" + "@emotion/hash" "^0.8.0" + "@emotion/unitless" "^0.7.5" + classnames "^2.3.1" + csstype "^3.0.10" + rc-util "^5.35.0" + stylis "^4.0.13" + "@ant-design/cssinjs@^1.7.1": version "1.8.1" resolved "https://registry.yarnpkg.com/@ant-design/cssinjs/-/cssinjs-1.8.1.tgz#326682e779f5cd074668391a6698b50342a07d92" @@ -27,6 +48,11 @@ resolved "https://registry.yarnpkg.com/@ant-design/icons-svg/-/icons-svg-4.2.1.tgz#8630da8eb4471a4aabdaed7d1ff6a97dcb2cf05a" integrity sha512-EB0iwlKDGpG93hW8f85CTJTs4SvMX7tt5ceupvhALp1IF44SeUFOMhKUOYqpsoYWQKAOuTRDMqn75rEaKDp0Xw== +"@ant-design/icons-svg@^4.3.0": + version "4.3.1" + resolved "https://registry.yarnpkg.com/@ant-design/icons-svg/-/icons-svg-4.3.1.tgz#4b2f65a17d4d32b526baa6414aca2117382bf8da" + integrity sha512-4QBZg8ccyC6LPIRii7A0bZUk3+lEDCLnhB+FVsflGdcWPPmV+j3fire4AwwoqHV/BibgvBmR9ZIo4s867smv+g== + "@ant-design/icons@^5.0.0": version "5.0.1" resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-5.0.1.tgz#febb1fdc5776f58187b2c953ac9a4496069d045b" @@ -38,6 +64,17 @@ classnames "^2.2.6" rc-util "^5.9.4" +"@ant-design/icons@^5.2.6": + version "5.2.6" + resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-5.2.6.tgz#2d4a9a37f531eb2a20cebec01d6fb69cf593900d" + integrity sha512-4wn0WShF43TrggskBJPRqCD0fcHbzTYjnaoskdiJrVHg86yxoZ8ZUqsXvyn4WUqehRiFKnaclOhqk9w4Ui2KVw== + dependencies: + "@ant-design/colors" "^7.0.0" + "@ant-design/icons-svg" "^4.3.0" + "@babel/runtime" "^7.11.2" + classnames "^2.2.6" + rc-util "^5.31.1" + "@ant-design/react-slick@~1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@ant-design/react-slick/-/react-slick-1.0.0.tgz#4696eecaa2dea0429e47ae24c267015cfd6df35c" @@ -49,6 +86,17 @@ resize-observer-polyfill "^1.5.1" throttle-debounce "^5.0.0" +"@ant-design/react-slick@~1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@ant-design/react-slick/-/react-slick-1.0.2.tgz#241bb412aeacf7ff5d50c61fa5db66773fde6b56" + integrity sha512-Wj8onxL/T8KQLFFiCA4t8eIRGpRR+UPgOdac2sYzonv+i0n3kXHmvHLLiOYL655DQx2Umii9Y9nNgL7ssu5haQ== + dependencies: + "@babel/runtime" "^7.10.4" + classnames "^2.2.5" + json2mq "^0.2.0" + resize-observer-polyfill "^1.5.1" + throttle-debounce "^5.0.0" + "@babel/code-frame@7.10.4": version "7.10.4" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a" @@ -70,11 +118,24 @@ dependencies: "@babel/highlight" "^7.12.13" +"@babel/code-frame@^7.22.13", "@babel/code-frame@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.4.tgz#03ae5af150be94392cb5c7ccd97db5a19a5da6aa" + integrity sha512-r1IONyb6Ia+jYR2vvIDhdWdlTGhqbBoFqLTQidzZ4kepUFH15ejXvFHxCVbtl7BOXIudsIubf4E81xeA3h3IXA== + dependencies: + "@babel/highlight" "^7.23.4" + chalk "^2.4.2" + "@babel/compat-data@^7.12.1", "@babel/compat-data@^7.13.11", "@babel/compat-data@^7.13.15", "@babel/compat-data@^7.14.0": version "7.14.0" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.14.0.tgz#a901128bce2ad02565df95e6ecbf195cf9465919" integrity sha512-vu9V3uMM/1o5Hl5OekMUowo3FqXLJSw+s+66nt0fSWVWTtmosdzn45JHOB3cPtZoe6CTBDzvSw0RdOY85Q37+Q== +"@babel/compat-data@^7.22.9": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.3.tgz#3febd552541e62b5e883a25eb3effd7c7379db11" + integrity sha512-BmR4bWbDIoFJmJ9z2cZ8Gmm2MXgEDgjdWgpKmKWUt54UGFJdlj31ECtbaDvCG/qVdG3AQ1SfpZEs01lUFbzLOQ== + "@babel/core@7.12.3": version "7.12.3" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.3.tgz#1b436884e1e3bff6fb1328dc02b208759de92ad8" @@ -118,6 +179,27 @@ semver "^6.3.0" source-map "^0.5.0" +"@babel/core@^7.11.6": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.23.3.tgz#5ec09c8803b91f51cc887dedc2654a35852849c9" + integrity sha512-Jg+msLuNuCJDyBvFv5+OKOUjWMZgd85bKjbICd3zWrKAo+bJ49HJufi7CQE0q0uR8NGyO6xkCACScNqyjHSZew== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.22.13" + "@babel/generator" "^7.23.3" + "@babel/helper-compilation-targets" "^7.22.15" + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helpers" "^7.23.2" + "@babel/parser" "^7.23.3" + "@babel/template" "^7.22.15" + "@babel/traverse" "^7.23.3" + "@babel/types" "^7.23.3" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + "@babel/generator@^7.12.1", "@babel/generator@^7.14.2": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.14.2.tgz#d5773e8b557d421fd6ce0d5efa5fd7fc22567c30" @@ -127,6 +209,16 @@ jsesc "^2.5.1" source-map "^0.5.0" +"@babel/generator@^7.23.3", "@babel/generator@^7.23.4", "@babel/generator@^7.7.2": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.4.tgz#4a41377d8566ec18f807f42962a7f3551de83d1c" + integrity sha512-esuS49Cga3HcThFNebGhlgsrVLkvhqvYDTzgjfFFlHJcIfLe5jFmRRfCQ1KuBfc4Jrtn3ndLgKWAKjBE+IraYQ== + dependencies: + "@babel/types" "^7.23.4" + "@jridgewell/gen-mapping" "^0.3.2" + "@jridgewell/trace-mapping" "^0.3.17" + jsesc "^2.5.1" + "@babel/helper-annotate-as-pure@^7.10.4", "@babel/helper-annotate-as-pure@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz#0f58e86dfc4bb3b1fcd7db806570e177d439b6ab" @@ -152,6 +244,17 @@ browserslist "^4.14.5" semver "^6.3.0" +"@babel/helper-compilation-targets@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz#0698fc44551a26cf29f18d4662d5bf545a6cfc52" + integrity sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw== + dependencies: + "@babel/compat-data" "^7.22.9" + "@babel/helper-validator-option" "^7.22.15" + browserslist "^4.21.9" + lru-cache "^5.1.1" + semver "^6.3.1" + "@babel/helper-create-class-features-plugin@^7.12.1", "@babel/helper-create-class-features-plugin@^7.13.0", "@babel/helper-create-class-features-plugin@^7.14.0": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.14.2.tgz#4e455b0329af29c2d3ad254b5dd5aed34595385d" @@ -186,6 +289,11 @@ resolve "^1.14.2" semver "^6.1.2" +"@babel/helper-environment-visitor@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" + integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== + "@babel/helper-explode-assignable-expression@^7.12.13": version "7.13.0" resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz#17b5c59ff473d9f956f40ef570cf3a76ca12657f" @@ -202,6 +310,14 @@ "@babel/template" "^7.12.13" "@babel/types" "^7.14.2" +"@babel/helper-function-name@^7.23.0": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" + integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw== + dependencies: + "@babel/template" "^7.22.15" + "@babel/types" "^7.23.0" + "@babel/helper-get-function-arity@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz#bc63451d403a3b3082b97e1d8b3fe5bd4091e583" @@ -217,6 +333,13 @@ "@babel/traverse" "^7.13.15" "@babel/types" "^7.13.16" +"@babel/helper-hoist-variables@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" + integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== + dependencies: + "@babel/types" "^7.22.5" + "@babel/helper-member-expression-to-functions@^7.13.12": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz#dfe368f26d426a07299d8d6513821768216e6d72" @@ -231,6 +354,13 @@ dependencies: "@babel/types" "^7.13.12" +"@babel/helper-module-imports@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz#16146307acdc40cc00c3b2c647713076464bdbf0" + integrity sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w== + dependencies: + "@babel/types" "^7.22.15" + "@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.13.0", "@babel/helper-module-transforms@^7.14.0", "@babel/helper-module-transforms@^7.14.2": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.14.2.tgz#ac1cc30ee47b945e3e0c4db12fa0c5389509dfe5" @@ -245,6 +375,17 @@ "@babel/traverse" "^7.14.2" "@babel/types" "^7.14.2" +"@babel/helper-module-transforms@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz#d7d12c3c5d30af5b3c0fcab2a6d5217773e2d0f1" + integrity sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-simple-access" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/helper-validator-identifier" "^7.22.20" + "@babel/helper-optimise-call-expression@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz#5c02d171b4c8615b1e7163f888c1c81c30a2aaea" @@ -257,6 +398,11 @@ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz#806526ce125aed03373bc416a828321e3a6a33af" integrity sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ== +"@babel/helper-plugin-utils@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295" + integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg== + "@babel/helper-remap-async-to-generator@^7.13.0": version "7.13.0" resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz#376a760d9f7b4b2077a9dd05aa9c3927cadb2209" @@ -283,6 +429,13 @@ dependencies: "@babel/types" "^7.13.12" +"@babel/helper-simple-access@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz#4938357dc7d782b80ed6dbb03a0fba3d22b1d5de" + integrity sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w== + dependencies: + "@babel/types" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers@^7.12.1": version "7.12.1" resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz#462dc63a7e435ade8468385c63d2b84cce4b3cbf" @@ -297,16 +450,38 @@ dependencies: "@babel/types" "^7.12.13" +"@babel/helper-split-export-declaration@^7.22.6": + version "7.22.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c" + integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-string-parser@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz#9478c707febcbbe1ddb38a3d91a2e054ae622d83" + integrity sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ== + "@babel/helper-validator-identifier@^7.12.11", "@babel/helper-validator-identifier@^7.14.0": version "7.14.0" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz#d26cad8a47c65286b15df1547319a5d0bcf27288" integrity sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A== +"@babel/helper-validator-identifier@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" + integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== + "@babel/helper-validator-option@^7.12.1", "@babel/helper-validator-option@^7.12.17": version "7.12.17" resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz#d1fbf012e1a79b7eebbfdc6d270baaf8d9eb9831" integrity sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw== +"@babel/helper-validator-option@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz#694c30dfa1d09a6534cdfcafbe56789d36aba040" + integrity sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA== + "@babel/helper-wrap-function@^7.13.0": version "7.13.0" resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz#bdb5c66fda8526ec235ab894ad53a1235c79fcc4" @@ -326,6 +501,15 @@ "@babel/traverse" "^7.14.0" "@babel/types" "^7.14.0" +"@babel/helpers@^7.23.2": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.23.4.tgz#7d2cfb969aa43222032193accd7329851facf3c1" + integrity sha512-HfcMizYz10cr3h29VqyfGL6ZWIjTwWfvYBMsBVGwpcbhNGe3wQ1ZXZRPzZoAHhd9OqHadHqjQ89iVKINXnbzuw== + dependencies: + "@babel/template" "^7.22.15" + "@babel/traverse" "^7.23.4" + "@babel/types" "^7.23.4" + "@babel/highlight@^7.10.4", "@babel/highlight@^7.12.13": version "7.14.0" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.14.0.tgz#3197e375711ef6bf834e67d0daec88e4f46113cf" @@ -335,11 +519,25 @@ chalk "^2.0.0" js-tokens "^4.0.0" +"@babel/highlight@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.23.4.tgz#edaadf4d8232e1a961432db785091207ead0621b" + integrity sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A== + dependencies: + "@babel/helper-validator-identifier" "^7.22.20" + chalk "^2.4.2" + js-tokens "^4.0.0" + "@babel/parser@^7.1.0", "@babel/parser@^7.12.13", "@babel/parser@^7.12.3", "@babel/parser@^7.14.2", "@babel/parser@^7.7.0": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.14.2.tgz#0c1680aa44ad4605b16cbdcc5c341a61bde9c746" integrity sha512-IoVDIHpsgE/fu7eXBeRWt8zLbDrSvD7H1gpomOkPpBoEN8KCruCqSDdqo8dddwQQrui30KSvQBaMUOJiuFu6QQ== +"@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.15", "@babel/parser@^7.23.3", "@babel/parser@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.4.tgz#409fbe690c333bb70187e2de4021e1e47a026661" + integrity sha512-vf3Xna6UEprW+7t6EtOmFpHNAuxw3xqPZghy+brsnusscJRW5BMUzzHZc5ICjULee81WeUV2jjakG09MDglJXQ== + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.13.12": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz#a3484d84d0b549f3fc916b99ee4783f26fabad2a" @@ -595,6 +793,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" +"@babel/plugin-syntax-jsx@^7.7.2": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz#8f2e4f8a9b5f9aa16067e142c1ac9cd9f810f473" + integrity sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-logical-assignment-operators@^7.10.4", "@babel/plugin-syntax-logical-assignment-operators@^7.8.3": version "7.10.4" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" @@ -658,6 +863,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" +"@babel/plugin-syntax-typescript@^7.7.2": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz#24f460c85dbbc983cd2b9c4994178bcc01df958f" + integrity sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-transform-arrow-functions@^7.12.1", "@babel/plugin-transform-arrow-functions@^7.13.0": version "7.13.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz#10a59bebad52d637a027afa692e8d5ceff5e3dae" @@ -1225,12 +1437,12 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/runtime@^7.4.2", "@babel/runtime@^7.8.7": - version "7.15.3" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.15.3.tgz#2e1c2880ca118e5b2f9988322bd8a7656a32502b" - integrity sha512-OvwMLqNXkCXSz1kSm58sEsNuhqOx/fKpnUnKnFB5v8uDda5bLNEHNgKPvhDN6IU0LDcnHQ90LlJ0Q6jnyBSIBA== +"@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.23.2": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.5.tgz#11edb98f8aeec529b82b211028177679144242db" + integrity sha512-NdUTHcPe4C99WxPub+K9l9tK5/lV4UXIoaHSYgzco9BCyjKAAwzdBI+wWtYqHt7LJdbo74ZjRPJgzVweq1sz0w== dependencies: - regenerator-runtime "^0.13.4" + regenerator-runtime "^0.14.0" "@babel/template@^7.10.4", "@babel/template@^7.12.13", "@babel/template@^7.3.3": version "7.12.13" @@ -1241,6 +1453,15 @@ "@babel/parser" "^7.12.13" "@babel/types" "^7.12.13" +"@babel/template@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.15.tgz#09576efc3830f0430f4548ef971dde1350ef2f38" + integrity sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w== + dependencies: + "@babel/code-frame" "^7.22.13" + "@babel/parser" "^7.22.15" + "@babel/types" "^7.22.15" + "@babel/traverse@^7.1.0", "@babel/traverse@^7.12.1", "@babel/traverse@^7.13.0", "@babel/traverse@^7.13.15", "@babel/traverse@^7.14.0", "@babel/traverse@^7.14.2", "@babel/traverse@^7.7.0": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.14.2.tgz#9201a8d912723a831c2679c7ebbf2fe1416d765b" @@ -1255,6 +1476,22 @@ debug "^4.1.0" globals "^11.1.0" +"@babel/traverse@^7.23.3", "@babel/traverse@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.4.tgz#c2790f7edf106d059a0098770fe70801417f3f85" + integrity sha512-IYM8wSUwunWTB6tFC2dkKZhxbIjHoWemdK+3f8/wq8aKhbUscxD5MX72ubd90fxvFknaLPeGw5ycU84V1obHJg== + dependencies: + "@babel/code-frame" "^7.23.4" + "@babel/generator" "^7.23.4" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/parser" "^7.23.4" + "@babel/types" "^7.23.4" + debug "^4.1.0" + globals "^11.1.0" + "@babel/types@^7.0.0", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.12.6", "@babel/types@^7.13.0", "@babel/types@^7.13.12", "@babel/types@^7.13.16", "@babel/types@^7.14.0", "@babel/types@^7.14.2", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4", "@babel/types@^7.7.0": version "7.14.2" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.14.2.tgz#4208ae003107ef8a057ea8333e56eb64d2f6a2c3" @@ -1263,6 +1500,15 @@ "@babel/helper-validator-identifier" "^7.14.0" to-fast-properties "^2.0.0" +"@babel/types@^7.20.7", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.23.3", "@babel/types@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.4.tgz#7206a1810fc512a7f7f7d4dace4cb4c1c9dbfb8e" + integrity sha512-7uIFwVYpoplT5jp/kVv6EF93VaJ8H+Yn5IczYiaAi98ajzjfoZfslet/e0sLh+wVBjb2qqIut1b0S26VSafsSQ== + dependencies: + "@babel/helper-string-parser" "^7.23.4" + "@babel/helper-validator-identifier" "^7.22.20" + to-fast-properties "^2.0.0" + "@bcoe/v8-coverage@^0.2.3": version "0.2.3" resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" @@ -1291,6 +1537,11 @@ resolved "https://registry.yarnpkg.com/@ctrl/tinycolor/-/tinycolor-3.6.0.tgz#53fa5fe9c34faee89469e48f91d51a3766108bc8" integrity sha512-/Z3l6pXthq0JvMYdUFyX9j0MaCltlIn6mfh9jLyQwg5aPKxkyNa0PTHtU1AlFXLNk55ZuAeJRcpvq+tmLfKmaQ== +"@ctrl/tinycolor@^3.6.1": + version "3.6.1" + resolved "https://registry.yarnpkg.com/@ctrl/tinycolor/-/tinycolor-3.6.1.tgz#b6c75a56a1947cc916ea058772d666a2c8932f31" + integrity sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA== + "@discoveryjs/json-ext@^0.5.0": version "0.5.2" resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.2.tgz#8f03a22a04de437254e8ce8cc84ba39689288752" @@ -1326,10 +1577,15 @@ resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz#88da2b70d6ca18aaa6ed3687832e11f39e80624b" integrity sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ== -"@fortawesome/fontawesome-free@^5.8.1": - version "5.15.4" - resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-free/-/fontawesome-free-5.15.4.tgz#ecda5712b61ac852c760d8b3c79c96adca5554e5" - integrity sha512-eYm8vijH/hpzr/6/1CJ/V/Eb1xQFW2nnUKArb3z+yUWv7HTwj6M7SP957oMjfZjAHU6qpoNc2wQvIxBLWYa/Jg== +"@fortawesome/fontawesome-common-types@6.4.2": + version "6.4.2" + resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.2.tgz#1766039cad33f8ad87f9467b98e0d18fbc8f01c5" + integrity sha512-1DgP7f+XQIJbLFCTX1V2QnxVmpLdKdzzo2k8EmvDOePfchaIGQ9eCHj2up3/jNEbZuBqel5OxiaOJf37TWauRA== + +"@fortawesome/fontawesome-free@^6.2.1": + version "6.5.1" + resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-free/-/fontawesome-free-6.5.1.tgz#55cc8410abf1003b726324661ce5b0d1c10de258" + integrity sha512-CNy5vSwN3fsUStPRLX7fUYojyuzoEMSXPl7zSLJ8TgtRfjv24LOnOWKT2zYwaHZCJGkdyRnTmstR0P+Ah503Gw== "@fortawesome/fontawesome-svg-core@^6.4.0": version "6.4.0" @@ -1338,6 +1594,13 @@ dependencies: "@fortawesome/fontawesome-common-types" "6.4.0" +"@fortawesome/free-regular-svg-icons@^6.4.0": + version "6.4.2" + resolved "https://registry.yarnpkg.com/@fortawesome/free-regular-svg-icons/-/free-regular-svg-icons-6.4.2.tgz#aee79ed76ce5dd04931352f9d83700761b8b1b25" + integrity sha512-0+sIUWnkgTVVXVAPQmW4vxb9ZTHv0WstOa3rBx9iPxrrrDH6bNLsDYuwXF9b6fGm+iR7DKQvQshUH/FJm3ed9Q== + dependencies: + "@fortawesome/fontawesome-common-types" "6.4.2" + "@fortawesome/free-solid-svg-icons@^6.4.0": version "6.4.0" resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz#48c0e790847fa56299e2f26b82b39663b8ad7119" @@ -1412,6 +1675,18 @@ jest-util "^26.6.2" slash "^3.0.0" +"@jest/console@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.7.0.tgz#cd4822dbdb84529265c5a2bdb529a3c9cc950ffc" + integrity sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + slash "^3.0.0" + "@jest/core@^26.6.0", "@jest/core@^26.6.3": version "26.6.3" resolved "https://registry.yarnpkg.com/@jest/core/-/core-26.6.3.tgz#7639fcb3833d748a4656ada54bde193051e45fad" @@ -1446,6 +1721,40 @@ slash "^3.0.0" strip-ansi "^6.0.0" +"@jest/core@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.7.0.tgz#b6cccc239f30ff36609658c5a5e2291757ce448f" + integrity sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg== + dependencies: + "@jest/console" "^29.7.0" + "@jest/reporters" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + ansi-escapes "^4.2.1" + chalk "^4.0.0" + ci-info "^3.2.0" + exit "^0.1.2" + graceful-fs "^4.2.9" + jest-changed-files "^29.7.0" + jest-config "^29.7.0" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-resolve-dependencies "^29.7.0" + jest-runner "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + jest-watcher "^29.7.0" + micromatch "^4.0.4" + pretty-format "^29.7.0" + slash "^3.0.0" + strip-ansi "^6.0.0" + "@jest/environment@^26.6.0", "@jest/environment@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-26.6.2.tgz#ba364cc72e221e79cc8f0a99555bf5d7577cf92c" @@ -1456,6 +1765,31 @@ "@types/node" "*" jest-mock "^26.6.2" +"@jest/environment@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7" + integrity sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw== + dependencies: + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-mock "^29.7.0" + +"@jest/expect-utils@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.7.0.tgz#023efe5d26a8a70f21677d0a1afc0f0a44e3a1c6" + integrity sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA== + dependencies: + jest-get-type "^29.6.3" + +"@jest/expect@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.7.0.tgz#76a3edb0cb753b70dfbfe23283510d3d45432bf2" + integrity sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ== + dependencies: + expect "^29.7.0" + jest-snapshot "^29.7.0" + "@jest/fake-timers@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-26.6.2.tgz#459c329bcf70cee4af4d7e3f3e67848123535aad" @@ -1468,6 +1802,18 @@ jest-mock "^26.6.2" jest-util "^26.6.2" +"@jest/fake-timers@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.7.0.tgz#fd91bf1fffb16d7d0d24a426ab1a47a49881a565" + integrity sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ== + dependencies: + "@jest/types" "^29.6.3" + "@sinonjs/fake-timers" "^10.0.2" + "@types/node" "*" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-util "^29.7.0" + "@jest/globals@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-26.6.2.tgz#5b613b78a1aa2655ae908eba638cc96a20df720a" @@ -1477,6 +1823,16 @@ "@jest/types" "^26.6.2" expect "^26.6.2" +"@jest/globals@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.7.0.tgz#8d9290f9ec47ff772607fa864ca1d5a2efae1d4d" + integrity sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/types" "^29.6.3" + jest-mock "^29.7.0" + "@jest/reporters@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-26.6.2.tgz#1f518b99637a5f18307bd3ecf9275f6882a667f6" @@ -1509,6 +1865,43 @@ optionalDependencies: node-notifier "^8.0.0" +"@jest/reporters@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.7.0.tgz#04b262ecb3b8faa83b0b3d321623972393e8f4c7" + integrity sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg== + dependencies: + "@bcoe/v8-coverage" "^0.2.3" + "@jest/console" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" + "@types/node" "*" + chalk "^4.0.0" + collect-v8-coverage "^1.0.0" + exit "^0.1.2" + glob "^7.1.3" + graceful-fs "^4.2.9" + istanbul-lib-coverage "^3.0.0" + istanbul-lib-instrument "^6.0.0" + istanbul-lib-report "^3.0.0" + istanbul-lib-source-maps "^4.0.0" + istanbul-reports "^3.1.3" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + jest-worker "^29.7.0" + slash "^3.0.0" + string-length "^4.0.1" + strip-ansi "^6.0.0" + v8-to-istanbul "^9.0.1" + +"@jest/schemas@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03" + integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA== + dependencies: + "@sinclair/typebox" "^0.27.8" + "@jest/source-map@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-26.6.2.tgz#29af5e1e2e324cafccc936f218309f54ab69d535" @@ -1518,6 +1911,15 @@ graceful-fs "^4.2.4" source-map "^0.6.0" +"@jest/source-map@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.3.tgz#d90ba772095cf37a34a5eb9413f1b562a08554c4" + integrity sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw== + dependencies: + "@jridgewell/trace-mapping" "^0.3.18" + callsites "^3.0.0" + graceful-fs "^4.2.9" + "@jest/test-result@^26.6.0", "@jest/test-result@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-26.6.2.tgz#55da58b62df134576cc95476efa5f7949e3f5f18" @@ -1528,6 +1930,16 @@ "@types/istanbul-lib-coverage" "^2.0.0" collect-v8-coverage "^1.0.0" +"@jest/test-result@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.7.0.tgz#8db9a80aa1a097bb2262572686734baed9b1657c" + integrity sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA== + dependencies: + "@jest/console" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/istanbul-lib-coverage" "^2.0.0" + collect-v8-coverage "^1.0.0" + "@jest/test-sequencer@^26.6.3": version "26.6.3" resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-26.6.3.tgz#98e8a45100863886d074205e8ffdc5a7eb582b17" @@ -1539,6 +1951,16 @@ jest-runner "^26.6.3" jest-runtime "^26.6.3" +"@jest/test-sequencer@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz#6cef977ce1d39834a3aea887a1726628a6f072ce" + integrity sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw== + dependencies: + "@jest/test-result" "^29.7.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + slash "^3.0.0" + "@jest/transform@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-26.6.2.tgz#5ac57c5fa1ad17b2aae83e73e45813894dcf2e4b" @@ -1560,6 +1982,27 @@ source-map "^0.6.1" write-file-atomic "^3.0.0" +"@jest/transform@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.7.0.tgz#df2dd9c346c7d7768b8a06639994640c642e284c" + integrity sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw== + dependencies: + "@babel/core" "^7.11.6" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" + babel-plugin-istanbul "^6.1.1" + chalk "^4.0.0" + convert-source-map "^2.0.0" + fast-json-stable-stringify "^2.1.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + micromatch "^4.0.4" + pirates "^4.0.4" + slash "^3.0.0" + write-file-atomic "^4.0.2" + "@jest/types@^26.6.0", "@jest/types@^26.6.2": version "26.6.2" resolved "https://registry.yarnpkg.com/@jest/types/-/types-26.6.2.tgz#bef5a532030e1d88a2f5a6d933f84e97226ed48e" @@ -1571,6 +2014,50 @@ "@types/yargs" "^15.0.0" chalk "^4.0.0" +"@jest/types@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59" + integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw== + dependencies: + "@jest/schemas" "^29.6.3" + "@types/istanbul-lib-coverage" "^2.0.0" + "@types/istanbul-reports" "^3.0.0" + "@types/node" "*" + "@types/yargs" "^17.0.8" + chalk "^4.0.0" + +"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2": + version "0.3.3" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098" + integrity sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + +"@jridgewell/resolve-uri@^3.1.0": + version "3.1.1" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721" + integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== + +"@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": + version "1.4.15" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" + integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== + +"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.9": + version "0.3.20" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz#72e45707cf240fa6b081d0366f8265b0cd10197f" + integrity sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q== + dependencies: + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" + "@nodelib/fs.scandir@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" @@ -1617,6 +2104,16 @@ resolved "https://registry.yarnpkg.com/@polka/url/-/url-1.0.0-next.12.tgz#431ec342a7195622f86688bbda82e3166ce8cb28" integrity sha512-6RglhutqrGFMO1MNUXp95RBuYIuc8wTnMAV5MUhLmjTOy78ncwOw7RgeQ/HeymkKXRhZd0s2DNrM1rL7unk3MQ== +"@rc-component/color-picker@~1.4.1": + version "1.4.1" + resolved "https://registry.yarnpkg.com/@rc-component/color-picker/-/color-picker-1.4.1.tgz#dcab0b660e9c4ed63a7582db68ed4a77c862cb93" + integrity sha512-vh5EWqnsayZa/JwUznqDaPJz39jznx/YDbyBuVJntv735tKXKwEUZZb2jYEldOg+NKWZwtALjGMrNeGBmqFoEw== + dependencies: + "@babel/runtime" "^7.10.1" + "@ctrl/tinycolor" "^3.6.0" + classnames "^2.2.6" + rc-util "^5.30.0" + "@rc-component/context@^1.3.0": version "1.3.0" resolved "https://registry.yarnpkg.com/@rc-component/context/-/context-1.3.0.tgz#608ccf0abcbec9406751b17a4b35db08e481c110" @@ -1625,6 +2122,14 @@ "@babel/runtime" "^7.10.1" rc-util "^5.27.0" +"@rc-component/context@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@rc-component/context/-/context-1.4.0.tgz#dc6fb021d6773546af8f016ae4ce9aea088395e8" + integrity sha512-kFcNxg9oLRMoL3qki0OMxK+7g5mypjgaaJp/pkOis/6rVxma9nJBF/8kCIuTYHUQNr0ii7MxqE33wirPZLJQ2w== + dependencies: + "@babel/runtime" "^7.10.1" + rc-util "^5.27.0" + "@rc-component/mini-decimal@^1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@rc-component/mini-decimal/-/mini-decimal-1.0.1.tgz#e5dbc20a6a5b0e234d279bc71ce730ab865d3910" @@ -1641,6 +2146,15 @@ classnames "^2.3.2" rc-util "^5.24.4" +"@rc-component/mutate-observer@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@rc-component/mutate-observer/-/mutate-observer-1.1.0.tgz#ee53cc88b78aade3cd0653609215a44779386fd8" + integrity sha512-QjrOsDXQusNwGZPf4/qRQasg7UFEj06XiCJ8iuiq/Io7CrHrgVi6Uuetw60WAMG1799v+aM8kyc+1L/GBbHSlw== + dependencies: + "@babel/runtime" "^7.18.0" + classnames "^2.3.2" + rc-util "^5.24.4" + "@rc-component/portal@^1.0.0-6", "@rc-component/portal@^1.0.0-8", "@rc-component/portal@^1.0.0-9", "@rc-component/portal@^1.0.2", "@rc-component/portal@^1.1.0": version "1.1.1" resolved "https://registry.yarnpkg.com/@rc-component/portal/-/portal-1.1.1.tgz#1a30ffe51c240b54360cba8e8bfc5d1f559325c4" @@ -1650,6 +2164,26 @@ classnames "^2.3.2" rc-util "^5.24.4" +"@rc-component/portal@^1.1.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@rc-component/portal/-/portal-1.1.2.tgz#55db1e51d784e034442e9700536faaa6ab63fc71" + integrity sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg== + dependencies: + "@babel/runtime" "^7.18.0" + classnames "^2.3.2" + rc-util "^5.24.4" + +"@rc-component/tour@~1.10.0": + version "1.10.0" + resolved "https://registry.yarnpkg.com/@rc-component/tour/-/tour-1.10.0.tgz#b05bc327438f1c583439e2d2dcc10ec0530aea19" + integrity sha512-voV0BKaTJbewB9LLgAHQ7tAGG7rgDkKQkZo82xw2gIk542hY+o7zwoqdN16oHhIKk7eG/xi+mdXrONT62Dt57A== + dependencies: + "@babel/runtime" "^7.18.0" + "@rc-component/portal" "^1.0.0-9" + "@rc-component/trigger" "^1.3.6" + classnames "^2.3.2" + rc-util "^5.24.4" + "@rc-component/tour@~1.8.0": version "1.8.0" resolved "https://registry.yarnpkg.com/@rc-component/tour/-/tour-1.8.0.tgz#fda8b533e36db1d4254e3ffbcefe3395c346eb1c" @@ -1674,6 +2208,18 @@ rc-resize-observer "^1.3.1" rc-util "^5.29.2" +"@rc-component/trigger@^1.17.0", "@rc-component/trigger@^1.18.0", "@rc-component/trigger@^1.18.2": + version "1.18.2" + resolved "https://registry.yarnpkg.com/@rc-component/trigger/-/trigger-1.18.2.tgz#dc52c4c66fa8aaccaf0710498f2429fc05454e3b" + integrity sha512-jRLYgFgjLEPq3MvS87fIhcfuywFSRDaDrYw1FLku7Cm4esszvzTbA0JBsyacAyLrK9rF3TiHFcvoEDMzoD3CTA== + dependencies: + "@babel/runtime" "^7.23.2" + "@rc-component/portal" "^1.1.0" + classnames "^2.3.2" + rc-motion "^2.0.0" + rc-resize-observer "^1.3.1" + rc-util "^5.38.0" + "@rollup/plugin-node-resolve@^7.1.1": version "7.1.3" resolved "https://registry.yarnpkg.com/@rollup/plugin-node-resolve/-/plugin-node-resolve-7.1.3.tgz#80de384edfbd7bfc9101164910f86078151a3eca" @@ -1702,6 +2248,11 @@ estree-walker "^1.0.1" picomatch "^2.2.2" +"@sinclair/typebox@^0.27.8": + version "0.27.8" + resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" + integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== + "@sinonjs/commons@^1.7.0": version "1.8.3" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.3.tgz#3802ddd21a50a949b6721ddd72da36e67e7f1b2d" @@ -1709,6 +2260,20 @@ dependencies: type-detect "4.0.8" +"@sinonjs/commons@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.0.tgz#beb434fe875d965265e04722ccfc21df7f755d72" + integrity sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA== + dependencies: + type-detect "4.0.8" + +"@sinonjs/fake-timers@^10.0.2": + version "10.3.0" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz#55fdff1ecab9f354019129daf4df0dd4d923ea66" + integrity sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA== + dependencies: + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz#293674fccb3262ac782c7aadfdeca86b10c75c40" @@ -1870,13 +2435,13 @@ dependencies: "@babel/runtime" "^7.12.5" -"@tippy.js/react@^2.1.2": - version "2.2.3" - resolved "https://registry.yarnpkg.com/@tippy.js/react/-/react-2.2.3.tgz#2ffb0af6693055be7db4b329b2d3cc7f2356f68e" - integrity sha512-5XYvbQujzDj9r00JYEz/cBtm6DutjOdv2azdco53B+eWF7FDBCQfkLVn87wimfEpmGK0vqRQv/cwFxFcoOP98Q== +"@tippy.js/react@^3.1.1": + version "3.1.1" + resolved "https://registry.yarnpkg.com/@tippy.js/react/-/react-3.1.1.tgz#027e4595e55f31430741fe8e0d92aaddfbe47efd" + integrity sha512-KF45vW/jKh/nBXk/2zzTFslv/T46zOMkIoDJ56ymZ+M00yHttk58J5wZ29oqGqDIUnobWSZD+cFpbR4u/UUvgw== dependencies: prop-types "^15.6.2" - tippy.js "^4.3.4" + tippy.js "^5.1.1" "@types/anymatch@*": version "1.3.1" @@ -1899,6 +2464,17 @@ "@types/babel__template" "*" "@types/babel__traverse" "*" +"@types/babel__core@^7.1.14": + version "7.20.5" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.5.tgz#3df15f27ba85319caa07ba08d0721889bb39c017" + integrity sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA== + dependencies: + "@babel/parser" "^7.20.7" + "@babel/types" "^7.20.7" + "@types/babel__generator" "*" + "@types/babel__template" "*" + "@types/babel__traverse" "*" + "@types/babel__generator@*": version "7.6.2" resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.2.tgz#f3d71178e187858f7c45e30380f8f1b7415a12d8" @@ -1926,6 +2502,57 @@ resolved "https://registry.yarnpkg.com/@types/classnames/-/classnames-2.2.9.tgz#d868b6febb02666330410fe7f58f3c4b8258be7b" integrity sha512-MNl+rT5UmZeilaPxAVs6YaPC2m6aA8rofviZbhbxpPpl61uKodfdQVsBtgJGTqGizEf02oW3tsVe7FYB8kK14A== +"@types/d3-array@^3.0.3": + version "3.0.9" + resolved "https://registry.yarnpkg.com/@types/d3-array/-/d3-array-3.0.9.tgz#54feabd29d1f15940d422c16008c63c1e4e3d188" + integrity sha512-mZowFN3p64ajCJJ4riVYlOjNlBJv3hctgAY01pjw3qTnJePD8s9DZmYDzhHKvzfCYvdjwylkU38+Vdt7Cu2FDA== + +"@types/d3-color@*": + version "3.1.2" + resolved "https://registry.yarnpkg.com/@types/d3-color/-/d3-color-3.1.2.tgz#7939eed011a908287cd1bcfd11580c17b2ac7f8a" + integrity sha512-At+Ski7dL8Bs58E8g8vPcFJc8tGcaC12Z4m07+p41+DRqnZQcAlp3NfYjLrhNYv+zEyQitU1CUxXNjqUyf+c0g== + +"@types/d3-ease@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@types/d3-ease/-/d3-ease-3.0.1.tgz#ef386d2f28602dba82206888047f97f7f7f7558a" + integrity sha512-VZofjpEt8HWv3nxUAosj5o/+4JflnJ7Bbv07k17VO3T2WRuzGdZeookfaF60iVh5RdhVG49LE5w6LIshVUC6rg== + +"@types/d3-interpolate@^3.0.1": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/d3-interpolate/-/d3-interpolate-3.0.3.tgz#e10c06c4bf11bd770ed56184a0d76cd516ff4ded" + integrity sha512-6OZ2EIB4lLj+8cUY7I/Cgn9Q+hLdA4DjJHYOQDiHL0SzqS1K9DL5xIOVBSIHgF+tiuO9MU1D36qvdIvRDRPh+Q== + dependencies: + "@types/d3-color" "*" + +"@types/d3-path@*": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@types/d3-path/-/d3-path-3.0.1.tgz#6171c9e388904014764661a37613e3c4ab8df22d" + integrity sha512-blRhp7ki7pVznM8k6lk5iUU9paDbVRVq+/xpf0RRgSJn5gr6SE7RcFtxooYGMBOc1RZiGyqRpVdu5AD0z0ooMA== + +"@types/d3-scale@^4.0.2": + version "4.0.6" + resolved "https://registry.yarnpkg.com/@types/d3-scale/-/d3-scale-4.0.6.tgz#9d221949f37b90b52696ec99f9b1e972d55fe10d" + integrity sha512-lo3oMLSiqsQUovv8j15X4BNEDOsnHuGjeVg7GRbAuB2PUa1prK5BNSOu6xixgNf3nqxPl4I1BqJWrPvFGlQoGQ== + dependencies: + "@types/d3-time" "*" + +"@types/d3-shape@^3.1.0": + version "3.1.4" + resolved "https://registry.yarnpkg.com/@types/d3-shape/-/d3-shape-3.1.4.tgz#748a256d5e499cdfb3e48beca9c557f3ea0ff15c" + integrity sha512-M2/xsWPsjaZc5ifMKp1EBp0gqJG0eO/zlldJNOC85Y/5DGsBQ49gDkRJ2h5GY7ZVD6KUumvZWsylSbvTaJTqKg== + dependencies: + "@types/d3-path" "*" + +"@types/d3-time@*", "@types/d3-time@^3.0.0": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-3.0.2.tgz#f4425b2ebcb04495a7b2390da03633ef1a8adbe5" + integrity sha512-kbdRXTmUgNfw5OTE3KZnFQn6XdIc4QGroN5UixgdrXATmYsdlPQS6pEut9tVlIojtzuFD4txs/L+Rq41AHtLpg== + +"@types/d3-timer@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@types/d3-timer/-/d3-timer-3.0.1.tgz#8dac23292df0e559a3aa459d8efca78a734c3fbe" + integrity sha512-GGTvzKccVEhxmRfJEB6zhY9ieT4UhGVUIQaBzFpUO9OXy2ycAlnPCSJLzmGGgqt3KVjqN3QCQB4g1rsZnHsWhg== + "@types/eslint@^7.2.6": version "7.2.10" resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-7.2.10.tgz#4b7a9368d46c0f8cd5408c23288a59aa2394d917" @@ -1959,6 +2586,13 @@ dependencies: "@types/node" "*" +"@types/graceful-fs@^4.1.3": + version "4.1.9" + resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4" + integrity sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ== + dependencies: + "@types/node" "*" + "@types/history@*": version "4.7.8" resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.8.tgz#49348387983075705fe8f4e02fb67f7daaec4934" @@ -1974,26 +2608,31 @@ resolved "https://registry.yarnpkg.com/@types/html-minifier-terser/-/html-minifier-terser-5.1.1.tgz#3c9ee980f1a10d6021ae6632ca3e79ca2ec4fb50" integrity sha512-giAlZwstKbmvMk1OO7WXSj4OZ0keXAcl2TQq4LWHiiPH2ByaH7WeUzng+Qej8UPxxv+8lRTuouo0iaNDBuzIBA== -"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": +"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0": + version "2.0.6" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" + integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w== + +"@types/istanbul-lib-coverage@^2.0.1": version "2.0.3" resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.3.tgz#4ba8ddb720221f432e443bd5f9117fd22cfd4762" integrity sha512-sz7iLqvVUg1gIedBOvlkxPlc8/uVzyS5OwGz1cKjXzkl3FpL3al0crU8YGU1WoHkxn0Wxbw5tyi6hvzJKNzFsw== "@types/istanbul-lib-report@*": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#c14c24f18ea8190c118ee7562b7ff99a36552686" - integrity sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg== + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz#53047614ae72e19fc0401d872de3ae2b4ce350bf" + integrity sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA== dependencies: "@types/istanbul-lib-coverage" "*" "@types/istanbul-reports@^3.0.0": - version "3.0.0" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.0.tgz#508b13aa344fa4976234e75dddcc34925737d821" - integrity sha512-nwKNbvnwJ2/mndE9ItP/zc2TCzw6uuodnF4EHYWD+gCQDVBuRQL5UzbZD0/ezy1iKsFU2ZQiDqg4M9dN4+wZgA== + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz#0f03e3d2f670fbdac586e34b433783070cc16f54" + integrity sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ== dependencies: "@types/istanbul-lib-report" "*" -"@types/jest@*", "@types/jest@^26.0.15": +"@types/jest@*": version "26.0.23" resolved "https://registry.yarnpkg.com/@types/jest/-/jest-26.0.23.tgz#a1b7eab3c503b80451d019efb588ec63522ee4e7" integrity sha512-ZHLmWMJ9jJ9PTiT58juykZpL7KjwJywFN3Rr2pTSkyQfydf/rk22yS7W8p5DaVUMQ2BQC7oYiU3FjbTM/mYrOA== @@ -2001,6 +2640,14 @@ jest-diff "^26.0.0" pretty-format "^26.0.0" +"@types/jest@^29.5.10": + version "29.5.10" + resolved "https://registry.yarnpkg.com/@types/jest/-/jest-29.5.10.tgz#a10fc5bab9e426081c12b2ef73d24d4f0c9b7f50" + integrity sha512-tE4yxKEphEyxj9s4inideLHktW/x6DwesIwWZ9NN1FKf9zbJYsnhBoA9vrHA/IuIOKwPa5PcFBNV4lpMIOEzyQ== + dependencies: + expect "^29.0.0" + pretty-format "^29.0.0" + "@types/json-schema@*", "@types/json-schema@^7.0.3", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.6": version "7.0.7" resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.7.tgz#98a993516c859eb0d5c4c8f098317a9ea68db9ad" @@ -2017,9 +2664,11 @@ integrity sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA== "@types/node@*": - version "15.0.3" - resolved "https://registry.yarnpkg.com/@types/node/-/node-15.0.3.tgz#ee09fcaac513576474c327da5818d421b98db88a" - integrity sha512-/WbxFeBU+0F79z9RdEOXH4CsDga+ibi5M8uEYr91u3CkT/pdWcV8MCook+4wDPnZBexRdwWS+PiVZ2xJviAzcQ== + version "20.10.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.0.tgz#16ddf9c0a72b832ec4fcce35b8249cf149214617" + integrity sha512-D0WfRmU9TQ8I9PFx9Yc+EBHw+vSpIub4IDvQivcp26PtPrdMGAq5SDcpXEo/epqa/DXotVpekHiLNTg3iaKXBQ== + dependencies: + undici-types "~5.26.4" "@types/node@^12.0.0": version "12.20.13" @@ -2051,10 +2700,10 @@ resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.4.tgz#15925414e0ad2cd765bfef58842f7e26a7accb24" integrity sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug== -"@types/react-autocomplete@^1.8.5": - version "1.8.6" - resolved "https://registry.yarnpkg.com/@types/react-autocomplete/-/react-autocomplete-1.8.6.tgz#999d0cd10ac4164ea605da04d263e5fb3fd5745f" - integrity sha512-v3MOyT7gfMfu0K1Y6n9inGVO325evf96YY3Aw4i4WJs5f9+7Y/TVMPbNXoXRnbhlubgkqel19VnrCdBSodwOEg== +"@types/react-autocomplete@^1.8.4": + version "1.8.9" + resolved "https://registry.yarnpkg.com/@types/react-autocomplete/-/react-autocomplete-1.8.9.tgz#f7062603878a1f206584d0acbb9b8ac8620cd1b7" + integrity sha512-Il8qJEKvPU0uW+HOPiRHIKxGF61RM6cM5tEnZDmM5ek78OK5kfv04AZbNyqdPsuTnwp8HIRgBnQH2RhgKILjcg== dependencies: "@types/react" "*" @@ -2066,9 +2715,9 @@ "@types/react" "^16" "@types/react-form@^2.16.1": - version "2.16.4" - resolved "https://registry.yarnpkg.com/@types/react-form/-/react-form-2.16.4.tgz#e31790e6fe9fb90765e87c3a215f28f04814d171" - integrity sha512-Qj91wZVqqjdeGdMOPlFR91I6E241qnflP0p7nbuX0xJFgy1Rqrae1MtTg3wXGYqOxqF8DQhTTTF9oeVB7Ns8zg== + version "2.16.12" + resolved "https://registry.yarnpkg.com/@types/react-form/-/react-form-2.16.12.tgz#17e28e7b797fe9b9e5235f1a63c845753f987793" + integrity sha512-qVTmFkboa84RMArBZ+aYelpvMuKwciEOIOYbcsOJukb633wWjenWZGfB607LgzcGNaqGoP7Dupg/FmXz2eRcJg== dependencies: "@types/react" "*" @@ -2140,6 +2789,11 @@ dependencies: source-map "^0.6.1" +"@types/uuid@^9.0.3": + version "9.0.7" + resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.7.tgz#b14cebc75455eeeb160d5fe23c2fcc0c64f724d8" + integrity sha512-WUtIVRUZ9i5dYXefDEAI7sh9/O7jGvHg7Df/5O/gtH3Yabe5odI3UWopVR1qbPXQtvOxWu3mM4XxlYeZtMWF4g== + "@types/webpack-sources@*": version "2.1.0" resolved "https://registry.yarnpkg.com/@types/webpack-sources/-/webpack-sources-2.1.0.tgz#8882b0bd62d1e0ce62f183d0d01b72e6e82e8c10" @@ -2162,14 +2816,21 @@ source-map "^0.6.0" "@types/yargs-parser@*": - version "20.2.0" - resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-20.2.0.tgz#dd3e6699ba3237f0348cd085e4698780204842f9" - integrity sha512-37RSHht+gzzgYeobbG+KWryeAW8J33Nhr69cjTqSYymXVZEN9NbRYWoYlRtDhHKPVT1FyNKwaTPC1NynKZpzRA== + version "21.0.3" + resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" + integrity sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ== "@types/yargs@^15.0.0": - version "15.0.13" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.13.tgz#34f7fec8b389d7f3c1fd08026a5763e072d3c6dc" - integrity sha512-kQ5JNTrbDv3Rp5X2n/iUu37IJBDU2gsZ5R/g1/KHOOEc5IKfUFjXT6DENPGduh08I/pamwtEq4oul7gUqKTQDQ== + version "15.0.19" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.19.tgz#328fb89e46109ecbdb70c295d96ff2f46dfd01b9" + integrity sha512-2XUaGVmyQjgyAZldf0D0c14vvo/yv0MhQBSTJcejMMaitsn3nxCB6TmH4G0ZQf+uxROOa9mpanoSm8h6SG/1ZA== + dependencies: + "@types/yargs-parser" "*" + +"@types/yargs@^17.0.8": + version "17.0.32" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.32.tgz#030774723a2f7faafebf645f4e5a48371dca6229" + integrity sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog== dependencies: "@types/yargs-parser" "*" @@ -2591,10 +3252,10 @@ ansi-regex@^4.1.0: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== -ansi-regex@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" - integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== +ansi-regex@^5.0.0, ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-styles@^3.2.0, ansi-styles@^3.2.1: version "3.2.1" @@ -2610,6 +3271,11 @@ ansi-styles@^4.0.0, ansi-styles@^4.1.0: dependencies: color-convert "^2.0.1" +ansi-styles@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" + integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== + antd@^5.4.2: version "5.4.2" resolved "https://registry.yarnpkg.com/antd/-/antd-5.4.2.tgz#3923b96da76fc7276992e9fc0286ebb3a638e016" @@ -2664,6 +3330,60 @@ antd@^5.4.2: scroll-into-view-if-needed "^3.0.3" throttle-debounce "^5.0.0" +antd@^5.6.1: + version "5.11.5" + resolved "https://registry.yarnpkg.com/antd/-/antd-5.11.5.tgz#e1528aa9a10c035ce2921d487b96da2d07b4feaa" + integrity sha512-qB1YmvO4Zm4r48M0Ptxn7orpaXeMPSeTrrw6dAgtTYN+ysnWD/D/zlxFc5g73GywIzZ10XGqvNC+74A+HD0yeQ== + dependencies: + "@ant-design/colors" "^7.0.0" + "@ant-design/cssinjs" "^1.17.5" + "@ant-design/icons" "^5.2.6" + "@ant-design/react-slick" "~1.0.2" + "@babel/runtime" "^7.18.3" + "@ctrl/tinycolor" "^3.6.1" + "@rc-component/color-picker" "~1.4.1" + "@rc-component/mutate-observer" "^1.1.0" + "@rc-component/tour" "~1.10.0" + "@rc-component/trigger" "^1.18.2" + classnames "^2.3.2" + copy-to-clipboard "^3.3.3" + dayjs "^1.11.1" + qrcode.react "^3.1.0" + rc-cascader "~3.20.0" + rc-checkbox "~3.1.0" + rc-collapse "~3.7.1" + rc-dialog "~9.3.4" + rc-drawer "~6.5.2" + rc-dropdown "~4.1.0" + rc-field-form "~1.40.0" + rc-image "~7.5.1" + rc-input "~1.3.6" + rc-input-number "~8.4.0" + rc-mentions "~2.9.1" + rc-menu "~9.12.2" + rc-motion "^2.9.0" + rc-notification "~5.3.0" + rc-pagination "~3.7.0" + rc-picker "~3.14.6" + rc-progress "~3.5.1" + rc-rate "~2.12.0" + rc-resize-observer "^1.4.0" + rc-segmented "~2.2.2" + rc-select "~14.10.0" + rc-slider "~10.4.0" + rc-steps "~6.0.1" + rc-switch "~4.1.0" + rc-table "~7.36.0" + rc-tabs "~12.13.1" + rc-textarea "~1.5.3" + rc-tooltip "~6.1.2" + rc-tree "~5.8.2" + rc-tree-select "~5.15.0" + rc-upload "~4.3.5" + rc-util "^5.38.1" + scroll-into-view-if-needed "^3.1.0" + throttle-debounce "^5.0.0" + anymatch@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" @@ -2685,28 +3405,30 @@ aproba@^1.1.1: resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== -"argo-ui@git+https://github.com/argoproj/argo-ui.git": +"argo-ui@git+https://github.com/argoproj/argo-ui.git#5ff344ac9692c14dd108468bd3c020c3c75181cb": version "1.0.0" - resolved "git+https://github.com/argoproj/argo-ui.git#a7be8b3208549c7cf5a0b9531ca41af5614f0eda" + resolved "git+https://github.com/argoproj/argo-ui.git#5ff344ac9692c14dd108468bd3c020c3c75181cb" dependencies: - "@fortawesome/fontawesome-free" "^5.8.1" - "@tippy.js/react" "^2.1.2" - "@types/react-autocomplete" "^1.8.5" - "@types/react-form" "^2.16.1" - "@types/react-helmet" "^6.1.0" - classnames "^2.2.5" + "@fortawesome/fontawesome-free" "^6.2.1" + "@tippy.js/react" "^3.1.1" + antd "^5.6.1" + classnames "^2.2.6" + core-js "^3.32.1" foundation-sites "^6.4.3" - history "^4.7.2" - moment "^2.20.1" - prop-types "^15.6.0" - react-autocomplete "^1.8.1" + history "^4.10.1" + moment "^2.29.4" + moment-timezone "^0.5.34" + prop-types "^15.8.1" + react-autocomplete "1.8.1" react-form "^2.16.0" react-helmet "^6.1.0" react-router-dom "^4.2.2" - react-toastify "^5.0.1" - rxjs "^6.6.6" - typescript "^4.0.3" - xterm "2.4.0" + react-toastify "9.0.8" + rxjs "^7.8.1" + typescript "^4.9.5" + uuid "^9.0.0" + xterm "^4.19.0" + xterm-addon-fit "^0.5.0" argparse@^1.0.7: version "1.0.10" @@ -2968,6 +3690,19 @@ babel-jest@^26.6.0, babel-jest@^26.6.3: graceful-fs "^4.2.4" slash "^3.0.0" +babel-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.7.0.tgz#f4369919225b684c56085998ac63dbd05be020d5" + integrity sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg== + dependencies: + "@jest/transform" "^29.7.0" + "@types/babel__core" "^7.1.14" + babel-plugin-istanbul "^6.1.1" + babel-preset-jest "^29.6.3" + chalk "^4.0.0" + graceful-fs "^4.2.9" + slash "^3.0.0" + babel-loader@8.1.0: version "8.1.0" resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.1.0.tgz#c611d5112bd5209abe8b9fa84c3e4da25275f1c3" @@ -2997,6 +3732,17 @@ babel-plugin-istanbul@^6.0.0: istanbul-lib-instrument "^4.0.0" test-exclude "^6.0.0" +babel-plugin-istanbul@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz#fa88ec59232fd9b4e36dbbc540a8ec9a9b47da73" + integrity sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@istanbuljs/load-nyc-config" "^1.0.0" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-instrument "^5.0.4" + test-exclude "^6.0.0" + babel-plugin-jest-hoist@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-26.6.2.tgz#8185bd030348d254c6d7dd974355e6a28b21e62d" @@ -3007,6 +3753,16 @@ babel-plugin-jest-hoist@^26.6.2: "@types/babel__core" "^7.0.0" "@types/babel__traverse" "^7.0.6" +babel-plugin-jest-hoist@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz#aadbe943464182a8922c3c927c3067ff40d24626" + integrity sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg== + dependencies: + "@babel/template" "^7.3.3" + "@babel/types" "^7.3.3" + "@types/babel__core" "^7.1.14" + "@types/babel__traverse" "^7.0.6" + babel-plugin-macros@2.8.0: version "2.8.0" resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.8.0.tgz#0f958a7cc6556b1e65344465d99111a1e5e10138" @@ -3089,6 +3845,14 @@ babel-preset-jest@^26.6.2: babel-plugin-jest-hoist "^26.6.2" babel-preset-current-node-syntax "^1.0.0" +babel-preset-jest@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz#fa05fa510e7d493896d7b0dd2033601c840f171c" + integrity sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA== + dependencies: + babel-plugin-jest-hoist "^29.6.3" + babel-preset-current-node-syntax "^1.0.0" + babel-preset-react-app@^10.0.0: version "10.0.0" resolved "https://registry.yarnpkg.com/babel-preset-react-app/-/babel-preset-react-app-10.0.0.tgz#689b60edc705f8a70ce87f47ab0e560a317d7045" @@ -3262,7 +4026,7 @@ braces@^2.3.1, braces@^2.3.2: split-string "^3.0.2" to-regex "^3.0.1" -braces@^3.0.1, braces@~3.0.2: +braces@^3.0.1, braces@^3.0.2, braces@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== @@ -3361,6 +4125,23 @@ browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.14.5, browserslist@^4 escalade "^3.1.1" node-releases "^1.1.71" +browserslist@^4.21.9: + version "4.22.1" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.22.1.tgz#ba91958d1a59b87dab6fed8dfbcb3da5e2e9c619" + integrity sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ== + dependencies: + caniuse-lite "^1.0.30001541" + electron-to-chromium "^1.4.535" + node-releases "^2.0.13" + update-browserslist-db "^1.0.13" + +bs-logger@0.x: + version "0.2.6" + resolved "https://registry.yarnpkg.com/bs-logger/-/bs-logger-0.2.6.tgz#eb7d365307a72cf974cc6cda76b68354ad336bd8" + integrity sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog== + dependencies: + fast-json-stable-stringify "2.x" + bser@2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" @@ -3536,6 +4317,11 @@ caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000981, caniuse-lite@^1.0.30001109, can resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001228.tgz#bfdc5942cd3326fa51ee0b42fbef4da9d492a7fa" integrity sha512-QQmLOGJ3DEgokHbMSA8cj2a+geXqmnpyOFT0lhQV6P3/YOJvGDEwoedcwxEQ30gJIwIIunHIicunJ2rzK5gB2A== +caniuse-lite@^1.0.30001541: + version "1.0.30001565" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001565.tgz#a528b253c8a2d95d2b415e11d8b9942acc100c4f" + integrity sha512-xrE//a3O7TP0vaJ8ikzkD2c2NgcVUvsEe2IvFTntV4Yd1Z9FVzh+gW+enX96L0psrbaFMcVcH2l90xNuGDWc8w== + capture-exit@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4" @@ -3570,7 +4356,15 @@ chalk@^3.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" -chalk@^4.0.0, chalk@^4.1.0: +chalk@^4.0.0: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^4.1.0: version "4.1.1" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== @@ -3642,6 +4436,11 @@ ci-info@^2.0.0: resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== +ci-info@^3.2.0: + version "3.9.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" + integrity sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ== + cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" @@ -3660,6 +4459,11 @@ cjs-module-lexer@^0.6.0: resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-0.6.0.tgz#4186fcca0eae175970aee870b9fe2d6cf8d5655f" integrity sha512-uc2Vix1frTfnuzxxu1Hp4ktSvM3QaI4oXl4ZUqL1wjTu/BGki9TrCWoqLTg/drR1KwAEarXuRFCG2Svr1GxPFw== +cjs-module-lexer@^1.0.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz#6c370ab19f8a3394e318fe682686ec0ac684d107" + integrity sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ== + class-utils@^0.3.5: version "0.3.6" resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" @@ -3710,6 +4514,15 @@ cliui@^6.0.0: strip-ansi "^6.0.0" wrap-ansi "^6.2.0" +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + clone-deep@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" @@ -3719,6 +4532,11 @@ clone-deep@^4.0.1: kind-of "^6.0.2" shallow-clone "^3.0.0" +clsx@^1.1.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" + integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== + co@^4.6.0: version "4.6.0" resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" @@ -3929,6 +4747,11 @@ convert-source-map@^0.3.3: resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-0.3.5.tgz#f1d802950af7dd2631a1febe0596550c86ab3190" integrity sha1-8dgClQr33SYxof6+BZZVDIarMZA= +convert-source-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== + cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" @@ -3956,7 +4779,7 @@ copy-descriptor@^0.1.0: resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= -copy-to-clipboard@^3.2.0: +copy-to-clipboard@^3.2.0, copy-to-clipboard@^3.3.3: version "3.3.3" resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz#55ac43a1db8ae639a4bd99511c148cdd1b83a1b0" integrity sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA== @@ -3998,6 +4821,11 @@ core-js@^2.4.0: resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.12.tgz#d9333dfa7b065e347cc5682219d6f690859cc2ec" integrity sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ== +core-js@^3.32.1: + version "3.33.3" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.33.3.tgz#3c644a323f0f533a0d360e9191e37f7fc059088d" + integrity sha512-lo0kOocUlLKmm6kv/FswQL8zbkH7mVsLJ/FULClOhv8WRVmKLVcs6XPNQAzstfeJTCHMyButEwG+z1kHxHoDZw== + core-js@^3.6.5: version "3.12.1" resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.12.1.tgz#6b5af4ff55616c08a44d386f1f510917ff204112" @@ -4071,6 +4899,19 @@ create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: safe-buffer "^5.0.1" sha.js "^2.4.8" +create-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/create-jest/-/create-jest-29.7.0.tgz#a355c5b3cb1e1af02ba177fe7afd7feee49a5320" + integrity sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q== + dependencies: + "@jest/types" "^29.6.3" + chalk "^4.0.0" + exit "^0.1.2" + graceful-fs "^4.2.9" + jest-config "^29.7.0" + jest-util "^29.7.0" + prompts "^2.0.1" + cross-spawn@7.0.3, cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: version "7.0.3" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" @@ -4343,16 +5184,82 @@ csstype@^3.0.10: resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b" integrity sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ== -csstype@^3.0.2: - version "3.0.8" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.8.tgz#d2266a792729fb227cd216fb572f43728e1ad340" - integrity sha512-jXKhWqXPmlUeoQnF/EhTtTl4C9SnrxSH/jZUih3jmO6lBKr99rP3/+FmrMj4EFpOXzMtXHAZkd3x0E6h6Fgflw== - cyclist@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-1.0.1.tgz#596e9698fd0c80e12038c2b82d6eb1b35b6224d9" integrity sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk= +"d3-array@2 - 3", "d3-array@2.10.0 - 3", d3-array@^3.1.6: + version "3.2.4" + resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" + integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== + dependencies: + internmap "1 - 2" + +"d3-color@1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-color/-/d3-color-3.1.0.tgz#395b2833dfac71507f12ac2f7af23bf819de24e2" + integrity sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA== + +d3-ease@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-3.0.1.tgz#9658ac38a2140d59d346160f1f6c30fda0bd12f4" + integrity sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w== + +"d3-format@1 - 3": + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" + integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== + +"d3-interpolate@1.2.0 - 3", d3-interpolate@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" + integrity sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g== + dependencies: + d3-color "1 - 3" + +d3-path@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" + integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== + +d3-scale@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-4.0.2.tgz#82b38e8e8ff7080764f8dcec77bd4be393689396" + integrity sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ== + dependencies: + d3-array "2.10.0 - 3" + d3-format "1 - 3" + d3-interpolate "1.2.0 - 3" + d3-time "2.1.1 - 3" + d3-time-format "2 - 4" + +d3-shape@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-3.2.0.tgz#a1a839cbd9ba45f28674c69d7f855bcf91dfc6a5" + integrity sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA== + dependencies: + d3-path "^3.1.0" + +"d3-time-format@2 - 4": + version "4.1.0" + resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" + integrity sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg== + dependencies: + d3-time "1 - 3" + +"d3-time@1 - 3", "d3-time@2.1.1 - 3", d3-time@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-time/-/d3-time-3.1.0.tgz#9310db56e992e3c0175e1ef385e545e48a9bb5c7" + integrity sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q== + dependencies: + d3-array "2 - 3" + +d3-timer@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" + integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== + d@1, d@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/d/-/d-1.0.1.tgz#8698095372d58dbee346ffd0c7093f99f8f9eb5a" @@ -4413,6 +5320,11 @@ decamelize@^1.2.0: resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= +decimal.js-light@^2.4.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/decimal.js-light/-/decimal.js-light-2.5.1.tgz#134fd32508f19e208f4fb2f8dac0d2626a867934" + integrity sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg== + decimal.js@^10.2.1: version "10.2.1" resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.2.1.tgz#238ae7b0f0c793d3e3cea410108b35a2c01426a3" @@ -4428,6 +5340,11 @@ dedent@^0.7.0: resolved "https://registry.yarnpkg.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c" integrity sha1-JJXduvbrh0q7Dhvp3yLS5aVEMmw= +dedent@^1.0.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" + integrity sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg== + deep-diff@^0.3.5: version "0.3.8" resolved "https://registry.yarnpkg.com/deep-diff/-/deep-diff-0.3.8.tgz#c01de63efb0eec9798801d40c7e0dae25b582c84" @@ -4551,6 +5468,11 @@ diff-sequences@^26.6.2: resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-26.6.2.tgz#48ba99157de1923412eed41db6b6d4aa9ca7c0b1" integrity sha512-Mv/TDa3nZ9sbc5soK+OoA74BsS3mL37yixCvUAQkiuA4Wz6YtwP/K47n2rv2ovzHZvoiQeA5FTQOschKkEwB0Q== +diff-sequences@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921" + integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q== + diffie-hellman@^5.0.0: version "5.0.3" resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" @@ -4626,13 +5548,12 @@ dom-converter@^0.2: dependencies: utila "~0.4" -dom-helpers@^5.0.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" - integrity sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA== +dom-helpers@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-3.4.0.tgz#e9b369700f959f62ecde5a6babde4bccd9169af8" + integrity sha512-LnuPJ+dwqKDIyotW1VzmOZ5TONUN7CwkCR5hrgawTUbkBGYdeoNLZo6nNfGkCrjtE1nXXaj7iMMpDa8/d9WoIA== dependencies: - "@babel/runtime" "^7.8.7" - csstype "^3.0.2" + "@babel/runtime" "^7.1.2" dom-scroll-into-view@1.0.1: version "1.0.1" @@ -4752,6 +5673,11 @@ electron-to-chromium@^1.3.564, electron-to-chromium@^1.3.723: resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.727.tgz#857e310ca00f0b75da4e1db6ff0e073cc4a91ddf" integrity sha512-Mfz4FIB4FSvEwBpDfdipRIrwd6uo8gUDoRDF4QEYb4h4tSuI3ov594OrjU6on042UlFHouIJpClDODGkPcBSbg== +electron-to-chromium@^1.4.535: + version "1.4.596" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7" + integrity sha512-zW3zbZ40Icb2BCWjm47nxwcFGYlIgdXkAx85XDO7cyky9J4QQfq8t0W19/TLZqq3JPQXtlv8BPIGmfa9Jb4scg== + elliptic@^6.5.3: version "6.5.4" resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb" @@ -4765,6 +5691,11 @@ elliptic@^6.5.3: minimalistic-assert "^1.0.1" minimalistic-crypto-utils "^1.0.1" +emittery@^0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" + integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ== + emittery@^0.7.1: version "0.7.2" resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.7.2.tgz#25595908e13af0f5674ab419396e2fb394cdfa82" @@ -5212,7 +6143,7 @@ etag@~1.8.1: resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= -eventemitter3@^4.0.0: +eventemitter3@^4.0.0, eventemitter3@^4.0.1: version "4.0.7" resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== @@ -5315,6 +6246,17 @@ expect@^26.6.0, expect@^26.6.2: jest-message-util "^26.6.2" jest-regex-util "^26.0.0" +expect@^29.0.0, expect@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/expect/-/expect-29.7.0.tgz#578874590dcb3214514084c08115d8aee61e11bc" + integrity sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw== + dependencies: + "@jest/expect-utils" "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + express@^4.17.1: version "4.17.1" resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134" @@ -5407,6 +6349,11 @@ fast-deep-equal@^3.1.1: resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== +fast-equals@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/fast-equals/-/fast-equals-5.0.1.tgz#a4eefe3c5d1c0d021aeed0bc10ba5e0c12ee405d" + integrity sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ== + fast-glob@^3.1.1, fast-glob@^3.2.4: version "3.2.5" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.5.tgz#7939af2a656de79a4f1901903ee8adcaa7cb9661" @@ -5419,7 +6366,7 @@ fast-glob@^3.1.1, fast-glob@^3.2.4: micromatch "^4.0.2" picomatch "^2.2.1" -fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0: +fast-json-stable-stringify@2.x, fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== @@ -5582,9 +6529,9 @@ flush-write-stream@^1.0.0: readable-stream "^2.3.6" follow-redirects@^1.0.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.1.tgz#d9114ded0a1cfdd334e164e6662ad02bfd91ff43" - integrity sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg== + version "1.15.5" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020" + integrity sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw== for-in@^1.0.2: version "1.0.2" @@ -5711,11 +6658,21 @@ fsevents@^2.1.2, fsevents@^2.1.3, fsevents@~2.3.1: resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== +fsevents@^2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + function-bind@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + functional-red-black-tree@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" @@ -5726,7 +6683,7 @@ gensync@^1.0.0-beta.1, gensync@^1.0.0-beta.2: resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== -get-caller-file@^2.0.1: +get-caller-file@^2.0.1, get-caller-file@^2.0.5: version "2.0.5" resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== @@ -5891,6 +6848,11 @@ graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6 resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== +graceful-fs@^4.2.9: + version "4.2.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + growly@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081" @@ -6009,6 +6971,13 @@ hash.js@^1.0.0, hash.js@^1.0.3: inherits "^2.0.3" minimalistic-assert "^1.0.1" +hasown@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" + integrity sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA== + dependencies: + function-bind "^1.1.2" + he@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" @@ -6019,7 +6988,7 @@ hex-color-regex@^1.1.0: resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ== -history@^4.7.2, history@^4.9.0: +history@^4.10.1, history@^4.7.2, history@^4.9.0: version "4.10.1" resolved "https://registry.yarnpkg.com/history/-/history-4.10.1.tgz#33371a65e3a83b267434e2b3f3b1b4c58aad4cf3" integrity sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew== @@ -6388,6 +7357,11 @@ internal-slot@^1.0.3: has "^1.0.3" side-channel "^1.0.4" +"internmap@1 - 2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + interpret@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/interpret/-/interpret-2.2.0.tgz#1a78a0b5965c40a5416d007ad6f50ad27c417df9" @@ -6518,6 +7492,13 @@ is-core-module@^2.0.0, is-core-module@^2.2.0: dependencies: has "^1.0.3" +is-core-module@^2.13.0: + version "2.13.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" + integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== + dependencies: + hasown "^2.0.0" + is-data-descriptor@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" @@ -6796,6 +7777,11 @@ istanbul-lib-coverage@^3.0.0: resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz#f5944a37c70b550b02a78a5c3b2055b280cec8ec" integrity sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg== +istanbul-lib-coverage@^3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756" + integrity sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg== + istanbul-lib-instrument@^4.0.0, istanbul-lib-instrument@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz#873c6fff897450118222774696a3f28902d77c1d" @@ -6806,6 +7792,28 @@ istanbul-lib-instrument@^4.0.0, istanbul-lib-instrument@^4.0.3: istanbul-lib-coverage "^3.0.0" semver "^6.3.0" +istanbul-lib-instrument@^5.0.4: + version "5.2.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz#d10c8885c2125574e1c231cacadf955675e1ce3d" + integrity sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg== + dependencies: + "@babel/core" "^7.12.3" + "@babel/parser" "^7.14.7" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-coverage "^3.2.0" + semver "^6.3.0" + +istanbul-lib-instrument@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz#71e87707e8041428732518c6fb5211761753fbdf" + integrity sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA== + dependencies: + "@babel/core" "^7.12.3" + "@babel/parser" "^7.14.7" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-coverage "^3.2.0" + semver "^7.5.4" + istanbul-lib-report@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6" @@ -6832,6 +7840,14 @@ istanbul-reports@^3.0.2: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" +istanbul-reports@^3.1.3: + version "3.1.6" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.6.tgz#2544bcab4768154281a2f0870471902704ccaa1a" + integrity sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg== + dependencies: + html-escaper "^2.0.0" + istanbul-lib-report "^3.0.0" + jest-changed-files@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-26.6.2.tgz#f6198479e1cc66f22f9ae1e22acaa0b429c042d0" @@ -6841,6 +7857,15 @@ jest-changed-files@^26.6.2: execa "^4.0.0" throat "^5.0.0" +jest-changed-files@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" + integrity sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w== + dependencies: + execa "^5.0.0" + jest-util "^29.7.0" + p-limit "^3.1.0" + jest-circus@26.6.0: version "26.6.0" resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-26.6.0.tgz#7d9647b2e7f921181869faae1f90a2629fd70705" @@ -6868,6 +7893,32 @@ jest-circus@26.6.0: stack-utils "^2.0.2" throat "^5.0.0" +jest-circus@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.7.0.tgz#b6817a45fcc835d8b16d5962d0c026473ee3668a" + integrity sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + co "^4.6.0" + dedent "^1.0.0" + is-generator-fn "^2.0.0" + jest-each "^29.7.0" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + p-limit "^3.1.0" + pretty-format "^29.7.0" + pure-rand "^6.0.0" + slash "^3.0.0" + stack-utils "^2.0.3" + jest-cli@^26.6.0: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-26.6.3.tgz#43117cfef24bc4cd691a174a8796a532e135e92a" @@ -6887,6 +7938,23 @@ jest-cli@^26.6.0: prompts "^2.0.1" yargs "^15.4.1" +jest-cli@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.7.0.tgz#5592c940798e0cae677eec169264f2d839a37995" + integrity sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg== + dependencies: + "@jest/core" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + chalk "^4.0.0" + create-jest "^29.7.0" + exit "^0.1.2" + import-local "^3.0.2" + jest-config "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + yargs "^17.3.1" + jest-config@^26.6.3: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-26.6.3.tgz#64f41444eef9eb03dc51d5c53b75c8c71f645349" @@ -6911,6 +7979,34 @@ jest-config@^26.6.3: micromatch "^4.0.2" pretty-format "^26.6.2" +jest-config@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.7.0.tgz#bcbda8806dbcc01b1e316a46bb74085a84b0245f" + integrity sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ== + dependencies: + "@babel/core" "^7.11.6" + "@jest/test-sequencer" "^29.7.0" + "@jest/types" "^29.6.3" + babel-jest "^29.7.0" + chalk "^4.0.0" + ci-info "^3.2.0" + deepmerge "^4.2.2" + glob "^7.1.3" + graceful-fs "^4.2.9" + jest-circus "^29.7.0" + jest-environment-node "^29.7.0" + jest-get-type "^29.6.3" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-runner "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + micromatch "^4.0.4" + parse-json "^5.2.0" + pretty-format "^29.7.0" + slash "^3.0.0" + strip-json-comments "^3.1.1" + jest-diff@^26.0.0, jest-diff@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-26.6.2.tgz#1aa7468b52c3a68d7d5c5fdcdfcd5e49bd164394" @@ -6921,6 +8017,16 @@ jest-diff@^26.0.0, jest-diff@^26.6.2: jest-get-type "^26.3.0" pretty-format "^26.6.2" +jest-diff@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.7.0.tgz#017934a66ebb7ecf6f205e84699be10afd70458a" + integrity sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw== + dependencies: + chalk "^4.0.0" + diff-sequences "^29.6.3" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + jest-docblock@^26.0.0: version "26.0.0" resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-26.0.0.tgz#3e2fa20899fc928cb13bd0ff68bd3711a36889b5" @@ -6928,6 +8034,13 @@ jest-docblock@^26.0.0: dependencies: detect-newline "^3.0.0" +jest-docblock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.7.0.tgz#8fddb6adc3cdc955c93e2a87f61cfd350d5d119a" + integrity sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g== + dependencies: + detect-newline "^3.0.0" + jest-each@^26.6.0, jest-each@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-26.6.2.tgz#02526438a77a67401c8a6382dfe5999952c167cb" @@ -6939,6 +8052,17 @@ jest-each@^26.6.0, jest-each@^26.6.2: jest-util "^26.6.2" pretty-format "^26.6.2" +jest-each@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.7.0.tgz#162a9b3f2328bdd991beaabffbb74745e56577d1" + integrity sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ== + dependencies: + "@jest/types" "^29.6.3" + chalk "^4.0.0" + jest-get-type "^29.6.3" + jest-util "^29.7.0" + pretty-format "^29.7.0" + jest-environment-jsdom@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-26.6.2.tgz#78d09fe9cf019a357009b9b7e1f101d23bd1da3e" @@ -6964,11 +8088,28 @@ jest-environment-node@^26.6.2: jest-mock "^26.6.2" jest-util "^26.6.2" +jest-environment-node@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" + integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-mock "^29.7.0" + jest-util "^29.7.0" + jest-get-type@^26.3.0: version "26.3.0" resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-26.3.0.tgz#e97dc3c3f53c2b406ca7afaed4493b1d099199e0" integrity sha512-TpfaviN1R2pQWkIihlfEanwOXK0zcxrKEE4MlU6Tn7keoXdN6/3gK/xl0yEh8DOunn5pOVGKf8hB4R9gVh04ig== +jest-get-type@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.6.3.tgz#36f499fdcea197c1045a127319c0481723908fd1" + integrity sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw== + jest-haste-map@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-26.6.2.tgz#dd7e60fe7dc0e9f911a23d79c5ff7fb5c2cafeaa" @@ -6990,6 +8131,25 @@ jest-haste-map@^26.6.2: optionalDependencies: fsevents "^2.1.2" +jest-haste-map@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.7.0.tgz#3c2396524482f5a0506376e6c858c3bbcc17b104" + integrity sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA== + dependencies: + "@jest/types" "^29.6.3" + "@types/graceful-fs" "^4.1.3" + "@types/node" "*" + anymatch "^3.0.3" + fb-watchman "^2.0.0" + graceful-fs "^4.2.9" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + jest-worker "^29.7.0" + micromatch "^4.0.4" + walker "^1.0.8" + optionalDependencies: + fsevents "^2.3.2" + jest-jasmine2@^26.6.3: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-26.6.3.tgz#adc3cf915deacb5212c93b9f3547cd12958f2edd" @@ -7022,6 +8182,14 @@ jest-leak-detector@^26.6.2: jest-get-type "^26.3.0" pretty-format "^26.6.2" +jest-leak-detector@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz#5b7ec0dadfdfec0ca383dc9aa016d36b5ea4c728" + integrity sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw== + dependencies: + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + jest-matcher-utils@^26.6.0, jest-matcher-utils@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-26.6.2.tgz#8e6fd6e863c8b2d31ac6472eeb237bc595e53e7a" @@ -7032,6 +8200,16 @@ jest-matcher-utils@^26.6.0, jest-matcher-utils@^26.6.2: jest-get-type "^26.3.0" pretty-format "^26.6.2" +jest-matcher-utils@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz#ae8fec79ff249fd592ce80e3ee474e83a6c44f12" + integrity sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g== + dependencies: + chalk "^4.0.0" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + jest-message-util@^26.6.0, jest-message-util@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-26.6.2.tgz#58173744ad6fc0506b5d21150b9be56ef001ca07" @@ -7047,6 +8225,21 @@ jest-message-util@^26.6.0, jest-message-util@^26.6.2: slash "^3.0.0" stack-utils "^2.0.2" +jest-message-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.7.0.tgz#8bc392e204e95dfe7564abbe72a404e28e51f7f3" + integrity sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w== + dependencies: + "@babel/code-frame" "^7.12.13" + "@jest/types" "^29.6.3" + "@types/stack-utils" "^2.0.0" + chalk "^4.0.0" + graceful-fs "^4.2.9" + micromatch "^4.0.4" + pretty-format "^29.7.0" + slash "^3.0.0" + stack-utils "^2.0.3" + jest-mock@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-26.6.2.tgz#d6cb712b041ed47fe0d9b6fc3474bc6543feb302" @@ -7055,6 +8248,15 @@ jest-mock@^26.6.2: "@jest/types" "^26.6.2" "@types/node" "*" +jest-mock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.7.0.tgz#4e836cf60e99c6fcfabe9f99d017f3fdd50a6347" + integrity sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-util "^29.7.0" + jest-pnp-resolver@^1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz#b704ac0ae028a89108a4d040b3f919dfddc8e33c" @@ -7065,6 +8267,11 @@ jest-regex-util@^26.0.0: resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-26.0.0.tgz#d25e7184b36e39fd466c3bc41be0971e821fee28" integrity sha512-Gv3ZIs/nA48/Zvjrl34bf+oD76JHiGDUxNOVgUjh3j890sblXryjY4rss71fPtD/njchl6PSE2hIhvyWa1eT0A== +jest-regex-util@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.6.3.tgz#4a556d9c776af68e1c5f48194f4d0327d24e8a52" + integrity sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg== + jest-resolve-dependencies@^26.6.3: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-26.6.3.tgz#6680859ee5d22ee5dcd961fe4871f59f4c784fb6" @@ -7074,6 +8281,14 @@ jest-resolve-dependencies@^26.6.3: jest-regex-util "^26.0.0" jest-snapshot "^26.6.2" +jest-resolve-dependencies@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz#1b04f2c095f37fc776ff40803dc92921b1e88428" + integrity sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA== + dependencies: + jest-regex-util "^29.6.3" + jest-snapshot "^29.7.0" + jest-resolve@26.6.0: version "26.6.0" resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-26.6.0.tgz#070fe7159af87b03e50f52ea5e17ee95bbee40e1" @@ -7102,6 +8317,21 @@ jest-resolve@^26.6.2: resolve "^1.18.1" slash "^3.0.0" +jest-resolve@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.7.0.tgz#64d6a8992dd26f635ab0c01e5eef4399c6bcbc30" + integrity sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA== + dependencies: + chalk "^4.0.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-pnp-resolver "^1.2.2" + jest-util "^29.7.0" + jest-validate "^29.7.0" + resolve "^1.20.0" + resolve.exports "^2.0.0" + slash "^3.0.0" + jest-runner@^26.6.0, jest-runner@^26.6.3: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-26.6.3.tgz#2d1fed3d46e10f233fd1dbd3bfaa3fe8924be159" @@ -7128,6 +8358,33 @@ jest-runner@^26.6.0, jest-runner@^26.6.3: source-map-support "^0.5.6" throat "^5.0.0" +jest-runner@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.7.0.tgz#809af072d408a53dcfd2e849a4c976d3132f718e" + integrity sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ== + dependencies: + "@jest/console" "^29.7.0" + "@jest/environment" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + emittery "^0.13.1" + graceful-fs "^4.2.9" + jest-docblock "^29.7.0" + jest-environment-node "^29.7.0" + jest-haste-map "^29.7.0" + jest-leak-detector "^29.7.0" + jest-message-util "^29.7.0" + jest-resolve "^29.7.0" + jest-runtime "^29.7.0" + jest-util "^29.7.0" + jest-watcher "^29.7.0" + jest-worker "^29.7.0" + p-limit "^3.1.0" + source-map-support "0.5.13" + jest-runtime@^26.6.0, jest-runtime@^26.6.3: version "26.6.3" resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-26.6.3.tgz#4f64efbcfac398331b74b4b3c82d27d401b8fa2b" @@ -7161,6 +8418,34 @@ jest-runtime@^26.6.0, jest-runtime@^26.6.3: strip-bom "^4.0.0" yargs "^15.4.1" +jest-runtime@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.7.0.tgz#efecb3141cf7d3767a3a0cc8f7c9990587d3d817" + integrity sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/globals" "^29.7.0" + "@jest/source-map" "^29.6.3" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + cjs-module-lexer "^1.0.0" + collect-v8-coverage "^1.0.0" + glob "^7.1.3" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + slash "^3.0.0" + strip-bom "^4.0.0" + jest-serializer@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-serializer/-/jest-serializer-26.6.2.tgz#d139aafd46957d3a448f3a6cdabe2919ba0742d1" @@ -7191,6 +8476,32 @@ jest-snapshot@^26.6.0, jest-snapshot@^26.6.2: pretty-format "^26.6.2" semver "^7.3.2" +jest-snapshot@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.7.0.tgz#c2c574c3f51865da1bb329036778a69bf88a6be5" + integrity sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw== + dependencies: + "@babel/core" "^7.11.6" + "@babel/generator" "^7.7.2" + "@babel/plugin-syntax-jsx" "^7.7.2" + "@babel/plugin-syntax-typescript" "^7.7.2" + "@babel/types" "^7.3.3" + "@jest/expect-utils" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + babel-preset-current-node-syntax "^1.0.0" + chalk "^4.0.0" + expect "^29.7.0" + graceful-fs "^4.2.9" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + natural-compare "^1.4.0" + pretty-format "^29.7.0" + semver "^7.5.3" + jest-util@^26.6.0, jest-util@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-26.6.2.tgz#907535dbe4d5a6cb4c47ac9b926f6af29576cbc1" @@ -7203,6 +8514,18 @@ jest-util@^26.6.0, jest-util@^26.6.2: is-ci "^2.0.0" micromatch "^4.0.2" +jest-util@^29.0.0, jest-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.7.0.tgz#23c2b62bfb22be82b44de98055802ff3710fc0bc" + integrity sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + ci-info "^3.2.0" + graceful-fs "^4.2.9" + picomatch "^2.2.3" + jest-validate@^26.6.2: version "26.6.2" resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-26.6.2.tgz#23d380971587150467342911c3d7b4ac57ab20ec" @@ -7215,6 +8538,18 @@ jest-validate@^26.6.2: leven "^3.1.0" pretty-format "^26.6.2" +jest-validate@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.7.0.tgz#7bf705511c64da591d46b15fce41400d52147d9c" + integrity sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw== + dependencies: + "@jest/types" "^29.6.3" + camelcase "^6.2.0" + chalk "^4.0.0" + jest-get-type "^29.6.3" + leven "^3.1.0" + pretty-format "^29.7.0" + jest-watch-typeahead@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-0.6.1.tgz#45221b86bb6710b7e97baaa1640ae24a07785e63" @@ -7241,6 +8576,20 @@ jest-watcher@^26.3.0, jest-watcher@^26.6.2: jest-util "^26.6.2" string-length "^4.0.1" +jest-watcher@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.7.0.tgz#7810d30d619c3a62093223ce6bb359ca1b28a2f2" + integrity sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g== + dependencies: + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + ansi-escapes "^4.2.1" + chalk "^4.0.0" + emittery "^0.13.1" + jest-util "^29.7.0" + string-length "^4.0.1" + jest-worker@^24.9.0: version "24.9.0" resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-24.9.0.tgz#5dbfdb5b2d322e98567898238a9697bcce67b3e5" @@ -7258,6 +8607,16 @@ jest-worker@^26.5.0, jest-worker@^26.6.2: merge-stream "^2.0.0" supports-color "^7.0.0" +jest-worker@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.7.0.tgz#acad073acbbaeb7262bd5389e1bcf43e10058d4a" + integrity sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw== + dependencies: + "@types/node" "*" + jest-util "^29.7.0" + merge-stream "^2.0.0" + supports-color "^8.0.0" + jest@26.6.0: version "26.6.0" resolved "https://registry.yarnpkg.com/jest/-/jest-26.6.0.tgz#546b25a1d8c888569dbbe93cae131748086a4a25" @@ -7267,6 +8626,16 @@ jest@26.6.0: import-local "^3.0.2" jest-cli "^26.6.0" +jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest/-/jest-29.7.0.tgz#994676fc24177f088f1c5e3737f5697204ff2613" + integrity sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw== + dependencies: + "@jest/core" "^29.7.0" + "@jest/types" "^29.6.3" + import-local "^3.0.2" + jest-cli "^29.7.0" + "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" @@ -7388,6 +8757,11 @@ json5@^2.1.2: dependencies: minimist "^1.2.5" +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + jsonfile@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" @@ -7592,7 +8966,7 @@ lodash.debounce@^4.0.8: resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168= -lodash.memoize@^4.1.2: +lodash.memoize@4.x, lodash.memoize@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" integrity sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4= @@ -7687,6 +9061,18 @@ make-dir@^3.0.0, make-dir@^3.0.2: dependencies: semver "^6.0.0" +make-error@1.x: + version "1.3.6" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" + integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== + +makeerror@1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.12.tgz#3e5dd2079a82e812e983cc6610c4a2cb0eaa801a" + integrity sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg== + dependencies: + tmpl "1.0.5" + makeerror@1.0.x: version "1.0.11" resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c" @@ -7798,6 +9184,14 @@ micromatch@^4.0.0, micromatch@^4.0.2: braces "^3.0.1" picomatch "^2.2.3" +micromatch@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + dependencies: + braces "^3.0.2" + picomatch "^2.3.1" + miller-rabin@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" @@ -7973,7 +9367,14 @@ moment-timezone@^0.5.33: dependencies: moment ">= 2.9.0" -moment@2.29.4, "moment@>= 2.9.0", moment@^2.20.1, moment@^2.29.4: +moment-timezone@^0.5.34: + version "0.5.43" + resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.43.tgz#3dd7f3d0c67f78c23cd1906b9b2137a09b3c4790" + integrity sha512-72j3aNyuIsDxdF1i7CEgV2FfxM1r6aaqJyLB2vwb33mXYyoyLly+F1zbWqhA3/bVIoJ4szlUoMbUnVdid32NUQ== + dependencies: + moment "^2.29.4" + +moment@2.29.4, "moment@>= 2.9.0", moment@^2.29.4: version "2.29.4" resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.4.tgz#3dbe052889fe7c1b2ed966fcb3a77328964ef108" integrity sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w== @@ -8166,6 +9567,11 @@ node-releases@^1.1.61, node-releases@^1.1.71: resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== +node-releases@^2.0.13: + version "2.0.13" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d" + integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ== + normalize-package-data@^2.3.2, normalize-package-data@^2.5.0: version "2.5.0" resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" @@ -8458,7 +9864,7 @@ p-limit@^2.0.0, p-limit@^2.2.0: dependencies: p-try "^2.0.0" -p-limit@^3.0.2: +p-limit@^3.0.2, p-limit@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== @@ -8570,7 +9976,7 @@ parse-json@^4.0.0: error-ex "^1.3.1" json-parse-better-errors "^1.0.1" -parse-json@^5.0.0: +parse-json@^5.0.0, parse-json@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== @@ -8648,6 +10054,11 @@ path-parse@^1.0.6: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" @@ -8688,11 +10099,21 @@ performance-now@^2.1.0: resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.2, picomatch@^2.2.3: version "2.2.3" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.3.tgz#465547f359ccc206d3c48e46a1bcb89bf7ee619d" integrity sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg== +picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + pify@^2.0.0: version "2.3.0" resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" @@ -8722,6 +10143,11 @@ pirates@^4.0.1: dependencies: node-modules-regexp "^1.0.0" +pirates@^4.0.4: + version "4.0.6" + resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.6.tgz#3018ae32ecfcff6c29ba2267cbf21166ac1f36b9" + integrity sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg== + pkg-dir@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-2.0.0.tgz#f6d5d1109e19d63edf428e0bd57e12777615334b" @@ -8757,7 +10183,7 @@ pnp-webpack-plugin@1.6.4: dependencies: ts-pnp "^1.1.6" -popper.js@^1.14.7: +popper.js@^1.16.0: version "1.16.1" resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.16.1.tgz#2a223cb3dc7b6213d740e40372be40de43e65b1b" integrity sha512-Wb4p1J4zyFTbM+u6WuO4XstYx4Ky9Cewe4DWrel7B0w6VVICvPwdOpotjzcf6eD8TsckVnIMNONQyPIUFOUbCQ== @@ -9489,6 +10915,15 @@ pretty-format@^26.0.0, pretty-format@^26.6.0, pretty-format@^26.6.2: ansi-styles "^4.0.0" react-is "^17.0.1" +pretty-format@^29.0.0, pretty-format@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812" + integrity sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ== + dependencies: + "@jest/schemas" "^29.6.3" + ansi-styles "^5.0.0" + react-is "^18.0.0" + process-nextick-args@~2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" @@ -9532,7 +10967,7 @@ prompts@^2.0.1: kleur "^3.0.3" sisteransi "^1.0.5" -prop-types@^15.5.10, prop-types@^15.5.4, prop-types@^15.6.0, prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2: +prop-types@^15.5.10, prop-types@^15.5.4, prop-types@^15.6.1, prop-types@^15.6.2, prop-types@^15.7.2: version "15.7.2" resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== @@ -9620,6 +11055,11 @@ punycode@^2.1.0, punycode@^2.1.1: resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== +pure-rand@^6.0.0: + version "6.0.4" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" + integrity sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA== + q@^1.1.2: version "1.5.1" resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" @@ -9741,6 +11181,18 @@ rc-cascader@~3.10.0: rc-tree "~5.7.0" rc-util "^5.6.1" +rc-cascader@~3.20.0: + version "3.20.0" + resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-3.20.0.tgz#b270f9d84ed83417ee7309ef5e56e415f1586076" + integrity sha512-lkT9EEwOcYdjZ/jvhLoXGzprK1sijT3/Tp4BLxQQcHDZkkOzzwYQC9HgmKoJz0K7CukMfgvO9KqHeBdgE+pELw== + dependencies: + "@babel/runtime" "^7.12.5" + array-tree-filter "^2.1.0" + classnames "^2.3.1" + rc-select "~14.10.0" + rc-tree "~5.8.1" + rc-util "^5.37.0" + rc-checkbox@~3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-3.0.0.tgz#6b426d16c7d2ed9fee219a1dfb14d2c504a45300" @@ -9750,6 +11202,15 @@ rc-checkbox@~3.0.0: classnames "^2.3.2" rc-util "^5.25.2" +rc-checkbox@~3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-3.1.0.tgz#6be0d9d8de2cc96fb5e37f9036a1c3e360d0a42d" + integrity sha512-PAwpJFnBa3Ei+5pyqMMXdcKYKNBMS+TvSDiLdDnARnMJHC8ESxwPfm4Ao1gJiKtWLdmGfigascnCpwrHFgoOBQ== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.3.2" + rc-util "^5.25.2" + rc-collapse@~3.5.2: version "3.5.2" resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-3.5.2.tgz#abb7d144ad55bd9cbd201fa95bc5b271da2aa7c3" @@ -9760,6 +11221,16 @@ rc-collapse@~3.5.2: rc-motion "^2.3.4" rc-util "^5.27.0" +rc-collapse@~3.7.1: + version "3.7.2" + resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-3.7.2.tgz#d11538ff9c705a5c988d9a4dfcc051a919692fe3" + integrity sha512-ZRw6ipDyOnfLFySxAiCMdbHtb5ePAsB9mT17PA6y1mRD/W6KHRaZeb5qK/X9xDV1CqgyxMpzw0VdS74PCcUk4A== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.3.4" + rc-util "^5.27.0" + rc-dialog@~9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-9.1.0.tgz#6bf6fcc0453503b7643e54a5a445e835e3850649" @@ -9771,6 +11242,17 @@ rc-dialog@~9.1.0: rc-motion "^2.3.0" rc-util "^5.21.0" +rc-dialog@~9.3.4: + version "9.3.4" + resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-9.3.4.tgz#e0decb3d4a0dbe36524a67ed2f8fe2daa4b7b73c" + integrity sha512-975X3018GhR+EjZFbxA2Z57SX5rnu0G0/OxFgMMvZK4/hQWEm3MHaNvP4wXpxYDoJsp+xUvVW+GB9CMMCm81jA== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/portal" "^1.0.0-8" + classnames "^2.2.6" + rc-motion "^2.3.0" + rc-util "^5.21.0" + rc-drawer@~6.1.1: version "6.1.5" resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-6.1.5.tgz#c4137b944c16b7c179d0dba6f06ebe54f9311ec8" @@ -9782,6 +11264,17 @@ rc-drawer@~6.1.1: rc-motion "^2.6.1" rc-util "^5.21.2" +rc-drawer@~6.5.2: + version "6.5.2" + resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-6.5.2.tgz#49c1f279261992f6d4653d32a03b14acd436d610" + integrity sha512-QckxAnQNdhh4vtmKN0ZwDf3iakO83W9eZcSKWYYTDv4qcD2fHhRAZJJ/OE6v2ZlQ2kSqCJX5gYssF4HJFvsEPQ== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/portal" "^1.1.1" + classnames "^2.2.6" + rc-motion "^2.6.1" + rc-util "^5.36.0" + rc-dropdown@~4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-4.0.1.tgz#f65d9d3d89750241057db59d5a75e43cd4576b68" @@ -9789,7 +11282,17 @@ rc-dropdown@~4.0.0: dependencies: "@babel/runtime" "^7.18.3" classnames "^2.2.6" - rc-trigger "^5.3.1" + rc-trigger "^5.3.1" + rc-util "^5.17.0" + +rc-dropdown@~4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-4.1.0.tgz#418a68939631520de80d0865d02b440eeeb4168e" + integrity sha512-VZjMunpBdlVzYpEdJSaV7WM7O0jf8uyDjirxXLZRNZ+tAC+NzD3PXPEtliFwGzVwBBdCmGuSqiS9DWcOLxQ9tw== + dependencies: + "@babel/runtime" "^7.18.3" + "@rc-component/trigger" "^1.7.0" + classnames "^2.2.6" rc-util "^5.17.0" rc-field-form@~1.29.0: @@ -9801,6 +11304,15 @@ rc-field-form@~1.29.0: async-validator "^4.1.0" rc-util "^5.8.0" +rc-field-form@~1.40.0: + version "1.40.0" + resolved "https://registry.yarnpkg.com/rc-field-form/-/rc-field-form-1.40.0.tgz#808dce06ebae1c3aea574e672b673533fc98f11f" + integrity sha512-OM3N01X2BYFGJDJcwpk9/BBtlwgveE7eh2SQAKIxVCt9KVWlODYJ9ypTHQdxchfDbeJKJKxMBFXlLAmyvlgPHg== + dependencies: + "@babel/runtime" "^7.18.0" + async-validator "^4.1.0" + rc-util "^5.32.2" + rc-image@~5.16.0: version "5.16.0" resolved "https://registry.yarnpkg.com/rc-image/-/rc-image-5.16.0.tgz#79d5864bc1c5d66c4620176cc131d34cd4f4bea8" @@ -9813,6 +11325,18 @@ rc-image@~5.16.0: rc-motion "^2.6.2" rc-util "^5.0.6" +rc-image@~7.5.1: + version "7.5.1" + resolved "https://registry.yarnpkg.com/rc-image/-/rc-image-7.5.1.tgz#39a93354e14fe3e5eaafd9c9464e8fe3c6c171a0" + integrity sha512-Z9loECh92SQp0nSipc0MBuf5+yVC05H/pzC+Nf8xw1BKDFUJzUeehYBjaWlxly8VGBZJcTHYri61Fz9ng1G3Ag== + dependencies: + "@babel/runtime" "^7.11.2" + "@rc-component/portal" "^1.0.2" + classnames "^2.2.6" + rc-dialog "~9.3.4" + rc-motion "^2.6.2" + rc-util "^5.34.1" + rc-input-number@~7.4.0: version "7.4.2" resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-7.4.2.tgz#7c52d26b986461aa16e486d469dc0476d97c6ea3" @@ -9823,6 +11347,17 @@ rc-input-number@~7.4.0: classnames "^2.2.5" rc-util "^5.28.0" +rc-input-number@~8.4.0: + version "8.4.0" + resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-8.4.0.tgz#f0d0caa2ce3a4e37f062556f9cb4c08c8c23322d" + integrity sha512-B6rziPOLRmeP7kcS5qbdC5hXvvDHYKV4vUxmahevYx2E6crS2bRi0xLDjhJ0E1HtOWo8rTmaE2EBJAkTCZOLdA== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/mini-decimal" "^1.0.1" + classnames "^2.2.5" + rc-input "~1.3.5" + rc-util "^5.28.0" + rc-input@~1.0.0, rc-input@~1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/rc-input/-/rc-input-1.0.4.tgz#2f2c73c884f41e80685bb2eb7b9d5533e8540a77" @@ -9832,6 +11367,15 @@ rc-input@~1.0.0, rc-input@~1.0.4: classnames "^2.2.1" rc-util "^5.18.1" +rc-input@~1.3.5, rc-input@~1.3.6: + version "1.3.6" + resolved "https://registry.yarnpkg.com/rc-input/-/rc-input-1.3.6.tgz#038b74779b6c8b688ff60a41c3976d1db7a1d7d6" + integrity sha512-/HjTaKi8/Ts4zNbYaB5oWCquxFyFQO4Co1MnMgoCeGJlpe7k8Eir2HN0a0F9IHDmmo+GYiGgPpz7w/d/krzsJA== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-util "^5.18.1" + rc-mentions@~2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-2.2.0.tgz#27900ec04d067c58205309897efd190f5d8f4ac8" @@ -9845,6 +11389,31 @@ rc-mentions@~2.2.0: rc-textarea "~1.2.0" rc-util "^5.22.5" +rc-mentions@~2.9.1: + version "2.9.1" + resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-2.9.1.tgz#cfe55913fd5bc156ef9814f38c1a2ceefee032ce" + integrity sha512-cZuElWr/5Ws0PXx1uxobxfYh4mqUw2FitfabR62YnWgm+WAfDyXZXqZg5DxXW+M1cgVvntrQgDDd9LrihrXzew== + dependencies: + "@babel/runtime" "^7.22.5" + "@rc-component/trigger" "^1.5.0" + classnames "^2.2.6" + rc-input "~1.3.5" + rc-menu "~9.12.0" + rc-textarea "~1.5.0" + rc-util "^5.34.1" + +rc-menu@~9.12.0, rc-menu@~9.12.2: + version "9.12.2" + resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-9.12.2.tgz#1bab34646421224eff5c5b7de993f8ea1238418e" + integrity sha512-NzloFH2pRUYmQ3S/YbJAvRkgCZaLvq0sRa5rgJtuIHLfPPprNHNyepeSlT64+dbVqI4qRWL44VN0lUCldCbbfg== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/trigger" "^1.17.0" + classnames "2.x" + rc-motion "^2.4.3" + rc-overflow "^1.3.1" + rc-util "^5.27.0" + rc-menu@~9.8.0, rc-menu@~9.8.3: version "9.8.4" resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-9.8.4.tgz#58bf19d471e3c74ff4bcfdb0f02a3826ebe2553b" @@ -9866,6 +11435,15 @@ rc-motion@^2.0.0, rc-motion@^2.0.1, rc-motion@^2.3.0, rc-motion@^2.3.4, rc-motio classnames "^2.2.1" rc-util "^5.21.0" +rc-motion@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/rc-motion/-/rc-motion-2.9.0.tgz#9e18a1b8d61e528a97369cf9a7601e9b29205710" + integrity sha512-XIU2+xLkdIr1/h6ohPZXyPBMvOmuyFZQ/T0xnawz+Rh+gh4FINcnZmMT5UTIj6hgI0VLDjTaPeRd+smJeSPqiQ== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-util "^5.21.0" + rc-notification@~5.0.0: version "5.0.3" resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-5.0.3.tgz#2566d4a6b2334c171bad0cb9a8b80cb1a24b29e6" @@ -9876,6 +11454,16 @@ rc-notification@~5.0.0: rc-motion "^2.6.0" rc-util "^5.20.1" +rc-notification@~5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-5.3.0.tgz#e31c86fe2350598ade8cff383babd1befa7a94fe" + integrity sha512-WCf0uCOkZ3HGfF0p1H4Sgt7aWfipxORWTPp7o6prA3vxwtWhtug3GfpYls1pnBp4WA+j8vGIi5c2/hQRpGzPcQ== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.9.0" + rc-util "^5.20.1" + rc-overflow@^1.0.0, rc-overflow@^1.2.8: version "1.3.0" resolved "https://registry.yarnpkg.com/rc-overflow/-/rc-overflow-1.3.0.tgz#964f7db14aab611c3047788d3b8ee472732fee09" @@ -9886,6 +11474,16 @@ rc-overflow@^1.0.0, rc-overflow@^1.2.8: rc-resize-observer "^1.0.0" rc-util "^5.19.2" +rc-overflow@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/rc-overflow/-/rc-overflow-1.3.2.tgz#72ee49e85a1308d8d4e3bd53285dc1f3e0bcce2c" + integrity sha512-nsUm78jkYAoPygDAcGZeC2VwIg/IBGSodtOY3pMof4W3M9qRJgqaDYm03ZayHlde3I6ipliAxbN0RUcGf5KOzw== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-resize-observer "^1.0.0" + rc-util "^5.37.0" + rc-pagination@~3.3.1: version "3.3.1" resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-3.3.1.tgz#38e364674adf2a753a4fa26e0d9d88ebe523ed0f" @@ -9894,6 +11492,25 @@ rc-pagination@~3.3.1: "@babel/runtime" "^7.10.1" classnames "^2.2.1" +rc-pagination@~3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-3.7.0.tgz#4c4332800688ec0fd3b2435c4772f7f8d4d7b50e" + integrity sha512-IxSzKapd13L91/195o1TPkKnCNw8gIR25UP1GCW/7c7n/slhld4npu2j2PB9IWjXm4SssaAaSAt2lscYog7wzg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + rc-util "^5.32.2" + +rc-picker@~3.14.6: + version "3.14.6" + resolved "https://registry.yarnpkg.com/rc-picker/-/rc-picker-3.14.6.tgz#60fc34f9883272e10f6c593fa6d82e7e7a70781b" + integrity sha512-AdKKW0AqMwZsKvIpwUWDUnpuGKZVrbxVTZTNjcO+pViGkjC1EBcjMgxVe8tomOEaIHJL5Gd13vS8Rr3zzxWmag== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/trigger" "^1.5.0" + classnames "^2.2.1" + rc-util "^5.30.0" + rc-picker@~3.6.1: version "3.6.2" resolved "https://registry.yarnpkg.com/rc-picker/-/rc-picker-3.6.2.tgz#68d13af7d240e792769a306ed6447e66e47040aa" @@ -9913,6 +11530,15 @@ rc-progress@~3.4.1: classnames "^2.2.6" rc-util "^5.16.1" +rc-progress@~3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/rc-progress/-/rc-progress-3.5.1.tgz#a3cdfd2fe04eb5c3d43fa1c69e7dd70c73b102ae" + integrity sha512-V6Amx6SbLRwPin/oD+k1vbPrO8+9Qf8zW1T8A7o83HdNafEVvAxPV5YsgtKFP+Ud5HghLj33zKOcEHrcrUGkfw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.6" + rc-util "^5.16.1" + rc-rate@~2.10.0: version "2.10.0" resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.10.0.tgz#b16fd906c13bfc26b4776e27a14d13d06d50c635" @@ -9922,6 +11548,15 @@ rc-rate@~2.10.0: classnames "^2.2.5" rc-util "^5.0.1" +rc-rate@~2.12.0: + version "2.12.0" + resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.12.0.tgz#0182deffed3b009cdcc61660da8746c39ed91ed5" + integrity sha512-g092v5iZCdVzbjdn28FzvWebK2IutoVoiTeqoLTj9WM7SjA/gOJIw5/JFZMRyJYYVe1jLAU2UhAfstIpCNRozg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-util "^5.0.1" + rc-resize-observer@^1.0.0, rc-resize-observer@^1.1.0, rc-resize-observer@^1.2.0, rc-resize-observer@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/rc-resize-observer/-/rc-resize-observer-1.3.1.tgz#b61b9f27048001243617b81f95e53d7d7d7a6a3d" @@ -9932,6 +11567,16 @@ rc-resize-observer@^1.0.0, rc-resize-observer@^1.1.0, rc-resize-observer@^1.2.0, rc-util "^5.27.0" resize-observer-polyfill "^1.5.1" +rc-resize-observer@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/rc-resize-observer/-/rc-resize-observer-1.4.0.tgz#7bba61e6b3c604834980647cce6451914750d0cc" + integrity sha512-PnMVyRid9JLxFavTjeDXEXo65HCRqbmLBw9xX9gfC4BZiSzbLXKzW3jPz+J0P71pLbD5tBMTT+mkstV5gD0c9Q== + dependencies: + "@babel/runtime" "^7.20.7" + classnames "^2.2.1" + rc-util "^5.38.0" + resize-observer-polyfill "^1.5.1" + rc-segmented@~2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/rc-segmented/-/rc-segmented-2.1.2.tgz#14c9077a1dae9c2ccb2ef5fbc5662c1c48c7ce8e" @@ -9942,6 +11587,29 @@ rc-segmented@~2.1.2: rc-motion "^2.4.4" rc-util "^5.17.0" +rc-segmented@~2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/rc-segmented/-/rc-segmented-2.2.2.tgz#a34f12ce6c0975fc3042ae7656bcd18e1744798e" + integrity sha512-Mq52M96QdHMsNdE/042ibT5vkcGcD5jxKp7HgPC2SRofpia99P5fkfHy1pEaajLMF/kj0+2Lkq1UZRvqzo9mSA== + dependencies: + "@babel/runtime" "^7.11.1" + classnames "^2.2.1" + rc-motion "^2.4.4" + rc-util "^5.17.0" + +rc-select@~14.10.0: + version "14.10.0" + resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-14.10.0.tgz#5f60e61ed7c9a83c8591616b1174a1c4ab2de0cd" + integrity sha512-TsIJTYafTTapCA32LLNpx/AD6ntepR1TG8jEVx35NiAAWCPymhUfuca8kRcUNd3WIGVMDcMKn9kkphoxEz+6Ag== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/trigger" "^1.5.0" + classnames "2.x" + rc-motion "^2.0.1" + rc-overflow "^1.3.1" + rc-util "^5.16.1" + rc-virtual-list "^3.5.2" + rc-select@~14.4.0, rc-select@~14.4.3: version "14.4.3" resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-14.4.3.tgz#68d7f1b6bcb41543f69901951facd5e097fb835d" @@ -9964,6 +11632,15 @@ rc-slider@~10.1.0: classnames "^2.2.5" rc-util "^5.27.0" +rc-slider@~10.4.0: + version "10.4.1" + resolved "https://registry.yarnpkg.com/rc-slider/-/rc-slider-10.4.1.tgz#357d5b8aa85cb27379d8c9e8eb12f03a4cefb7d7" + integrity sha512-wiHRWgzEEHcgF7MWDd0ODsMpqBwszT558R2qH52fplJwctw/L9J8ipEt89ZqVASlh0QFG9kJPgBuL2+cbdLRUw== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.5" + rc-util "^5.27.0" + rc-steps@~6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-6.0.0.tgz#f7148f8097d5d135f19b96c1b4f4b50ad6093753" @@ -9973,6 +11650,15 @@ rc-steps@~6.0.0: classnames "^2.2.3" rc-util "^5.16.1" +rc-steps@~6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-6.0.1.tgz#c2136cd0087733f6d509209a84a5c80dc29a274d" + integrity sha512-lKHL+Sny0SeHkQKKDJlAjV5oZ8DwCdS2hFhAkIjuQt1/pB81M0cA0ErVFdHq9+jmPmFw1vJB2F5NBzFXLJxV+g== + dependencies: + "@babel/runtime" "^7.16.7" + classnames "^2.2.3" + rc-util "^5.16.1" + rc-switch@~4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-4.0.0.tgz#55fbf99fc2d680791175037d379e170ba51fbe78" @@ -9982,6 +11668,15 @@ rc-switch@~4.0.0: classnames "^2.2.1" rc-util "^5.0.1" +rc-switch@~4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-4.1.0.tgz#f37d81b4e0c5afd1274fd85367b17306bf25e7d7" + integrity sha512-TI8ufP2Az9oEbvyCeVE4+90PDSljGyuwix3fV58p7HV2o4wBnVToEyomJRVyTaZeqNPAp+vqeo4Wnj5u0ZZQBg== + dependencies: + "@babel/runtime" "^7.21.0" + classnames "^2.2.1" + rc-util "^5.30.0" + rc-table@~7.31.0: version "7.31.1" resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-7.31.1.tgz#85487b25d98559d6e684b3348e893da1d1f48232" @@ -9993,6 +11688,31 @@ rc-table@~7.31.0: rc-resize-observer "^1.1.0" rc-util "^5.27.1" +rc-table@~7.36.0: + version "7.36.0" + resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-7.36.0.tgz#95e50805392b6a723105c3eb77eefb1e14ba1ced" + integrity sha512-3xVcdCC5OLeOOhaCg+5Lps2oPreM/GWXmUXWTSX4p6vF7F76ABM4dfPpMJ9Dnf5yGRyh+8pe7FRyhRVnWw2H/w== + dependencies: + "@babel/runtime" "^7.10.1" + "@rc-component/context" "^1.4.0" + classnames "^2.2.5" + rc-resize-observer "^1.1.0" + rc-util "^5.37.0" + rc-virtual-list "^3.11.1" + +rc-tabs@~12.13.1: + version "12.13.1" + resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-12.13.1.tgz#e28c5652dfed4e72eb27a75a2691754afd3e5f68" + integrity sha512-83u3l2QkO0UznCzdBLEk9WnNcT+imtmDmMT993sUUEOGnNQAmqOdev0XjeqrcvsAMe9CDpAWDFd7L/RZw+LVJQ== + dependencies: + "@babel/runtime" "^7.11.2" + classnames "2.x" + rc-dropdown "~4.1.0" + rc-menu "~9.12.0" + rc-motion "^2.6.2" + rc-resize-observer "^1.0.0" + rc-util "^5.34.1" + rc-tabs@~12.5.6: version "12.5.10" resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-12.5.10.tgz#0e41c723fac66c4f0bcad3271429fff6653b0721" @@ -10017,6 +11737,17 @@ rc-textarea@~1.2.0, rc-textarea@~1.2.2: rc-resize-observer "^1.0.0" rc-util "^5.27.0" +rc-textarea@~1.5.0, rc-textarea@~1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/rc-textarea/-/rc-textarea-1.5.3.tgz#513e837d308584996c05f540f4f58645a3a8c89a" + integrity sha512-oH682ghHx++stFNYrosPRBfwsypywrTXpaD0/5Z8MPkUOnyOQUaY9ueL9tMu6BP1LfsuYQ1VLpg5OtshViLNgA== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "^2.2.1" + rc-input "~1.3.5" + rc-resize-observer "^1.0.0" + rc-util "^5.27.0" + rc-tooltip@~6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-6.0.1.tgz#6a5e33bd6c3f6afe8851ea90e7af43e5c26b3cc6" @@ -10026,6 +11757,26 @@ rc-tooltip@~6.0.0: "@rc-component/trigger" "^1.0.4" classnames "^2.3.1" +rc-tooltip@~6.1.2: + version "6.1.2" + resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-6.1.2.tgz#33923ecfb2cf24347975093cbd0b048ab33c9567" + integrity sha512-89zwvybvCxGJu3+gGF8w5AXd4HHk6hIN7K0vZbkzjilVaEAIWPqc1fcyeUeP71n3VCcw7pTL9LyFupFbrx8gHw== + dependencies: + "@babel/runtime" "^7.11.2" + "@rc-component/trigger" "^1.18.0" + classnames "^2.3.1" + +rc-tree-select@~5.15.0: + version "5.15.0" + resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-5.15.0.tgz#8591f1dd28b043dde6fa1ca30c7acb198b160a42" + integrity sha512-YJHfdO6azFnR0/JuNBZLDptGE4/RGfVeHAafUIYcm2T3RBkL1O8aVqiHvwIyLzdK59ry0NLrByd+3TkfpRM+9Q== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-select "~14.10.0" + rc-tree "~5.8.1" + rc-util "^5.16.1" + rc-tree-select@~5.8.0: version "5.8.0" resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-5.8.0.tgz#b3d861b7b2111d3a96b56040b851d5e280d71c95" @@ -10048,6 +11799,17 @@ rc-tree@~5.7.0: rc-util "^5.16.1" rc-virtual-list "^3.4.8" +rc-tree@~5.8.1, rc-tree@~5.8.2: + version "5.8.2" + resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-5.8.2.tgz#ed3a3f7c56597bbeab3303407a9e1739bbf15621" + integrity sha512-xH/fcgLHWTLmrSuNphU8XAqV7CdaOQgm4KywlLGNoTMhDAcNR3GVNP6cZzb0GrKmIZ9yae+QLot/cAgUdPRMzg== + dependencies: + "@babel/runtime" "^7.10.1" + classnames "2.x" + rc-motion "^2.0.1" + rc-util "^5.16.1" + rc-virtual-list "^3.5.1" + rc-trigger@^5.1.2, rc-trigger@^5.3.1, rc-trigger@^5.3.4: version "5.3.4" resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-5.3.4.tgz#6b4b26e32825677c837d1eb4d7085035eecf9a61" @@ -10068,6 +11830,15 @@ rc-upload@~4.3.0: classnames "^2.2.5" rc-util "^5.2.0" +rc-upload@~4.3.5: + version "4.3.5" + resolved "https://registry.yarnpkg.com/rc-upload/-/rc-upload-4.3.5.tgz#12fc69b2af74d08646a104828831bcaf44076eda" + integrity sha512-EHlKJbhkgFSQHliTj9v/2K5aEuFwfUQgZARzD7AmAPOneZEPiCNF3n6PEWIuqz9h7oq6FuXgdR67sC5BWFxJbA== + dependencies: + "@babel/runtime" "^7.18.3" + classnames "^2.2.5" + rc-util "^5.2.0" + rc-util@^5.0.1, rc-util@^5.0.6, rc-util@^5.15.0, rc-util@^5.16.0, rc-util@^5.16.1, rc-util@^5.17.0, rc-util@^5.18.1, rc-util@^5.19.2, rc-util@^5.2.0, rc-util@^5.20.1, rc-util@^5.21.0, rc-util@^5.21.2, rc-util@^5.22.5, rc-util@^5.24.4, rc-util@^5.25.2, rc-util@^5.26.0, rc-util@^5.27.0, rc-util@^5.27.1, rc-util@^5.28.0, rc-util@^5.29.2, rc-util@^5.6.1, rc-util@^5.8.0, rc-util@^5.9.4: version "5.29.3" resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-5.29.3.tgz#dc02b7b2103468e9fdf14e0daa58584f47898e37" @@ -10076,6 +11847,24 @@ rc-util@^5.0.1, rc-util@^5.0.6, rc-util@^5.15.0, rc-util@^5.16.0, rc-util@^5.16. "@babel/runtime" "^7.18.3" react-is "^16.12.0" +rc-util@^5.30.0, rc-util@^5.31.1, rc-util@^5.32.2, rc-util@^5.34.1, rc-util@^5.35.0, rc-util@^5.36.0, rc-util@^5.37.0, rc-util@^5.38.0, rc-util@^5.38.1: + version "5.38.1" + resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-5.38.1.tgz#4915503b89855f5c5cd9afd4c72a7a17568777bb" + integrity sha512-e4ZMs7q9XqwTuhIK7zBIVFltUtMSjphuPPQXHoHlzRzNdOwUxDejo0Zls5HYaJfRKNURcsS/ceKVULlhjBrxng== + dependencies: + "@babel/runtime" "^7.18.3" + react-is "^18.2.0" + +rc-virtual-list@^3.11.1, rc-virtual-list@^3.5.1, rc-virtual-list@^3.5.2: + version "3.11.3" + resolved "https://registry.yarnpkg.com/rc-virtual-list/-/rc-virtual-list-3.11.3.tgz#77d4e12e20c1ba314b43c0e37e118296674c5401" + integrity sha512-tu5UtrMk/AXonHwHxUogdXAWynaXsrx1i6dsgg+lOo/KJSF8oBAcprh1z5J3xgnPJD5hXxTL58F8s8onokdt0Q== + dependencies: + "@babel/runtime" "^7.20.0" + classnames "^2.2.6" + rc-resize-observer "^1.0.0" + rc-util "^5.36.0" + rc-virtual-list@^3.4.13, rc-virtual-list@^3.4.8: version "3.4.13" resolved "https://registry.yarnpkg.com/rc-virtual-list/-/rc-virtual-list-3.4.13.tgz#20acc934b263abcf7b7c161f50ef82281b2f7e8d" @@ -10098,10 +11887,10 @@ react-app-polyfill@^2.0.0: regenerator-runtime "^0.13.7" whatwg-fetch "^3.4.1" -react-autocomplete@^1.8.1: +react-autocomplete@1.8.1: version "1.8.1" resolved "https://registry.yarnpkg.com/react-autocomplete/-/react-autocomplete-1.8.1.tgz#ebbbc400006aa91ad538b2d14727b9e7e5d06310" - integrity sha1-67vEAABqqRrVOLLRRye55+XQYxA= + integrity sha512-YQGVN5POdcI3G89wUVWnJhk9rLF6JeB6Ik6xnNpfvSMG4tJkksBzqOE4mkFNGqEz+2AaQw13xNmVXresg9E3zg== dependencies: dom-scroll-into-view "1.0.1" prop-types "^15.5.10" @@ -10195,7 +11984,7 @@ react-hot-loader@^3.1.3: redbox-react "^1.3.6" source-map "^0.6.1" -react-is@^16.12.0, react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1: +react-is@^16.10.2, react-is@^16.12.0, react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1: version "16.13.1" resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== @@ -10205,6 +11994,11 @@ react-is@^17.0.1: resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== +react-is@^18.0.0, react-is@^18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b" + integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== + react-keyhooks@^0.2.3: version "0.2.3" resolved "https://registry.yarnpkg.com/react-keyhooks/-/react-keyhooks-0.2.3.tgz#57050244d9c501e5b812e2ee3156d8fb967afda9" @@ -10213,7 +12007,7 @@ react-keyhooks@^0.2.3: "@types/react" "^16.9.3" react "^16.9.3" -react-lifecycles-compat@^3.0.0: +react-lifecycles-compat@^3.0.0, react-lifecycles-compat@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== @@ -10243,6 +12037,13 @@ react-refresh@^0.8.3: resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.8.3.tgz#721d4657672d400c5e3c75d063c4a85fb2d5d68f" integrity sha512-X8jZHc7nCMjaCqoU+V2I0cOhNW+QMBwSUkeXnTi8IPe6zaRWfn60ZzvFDZqWPfmSJfjub7dDW1SP0jaHWLu/hg== +react-resize-detector@^8.0.4: + version "8.1.0" + resolved "https://registry.yarnpkg.com/react-resize-detector/-/react-resize-detector-8.1.0.tgz#1c7817db8bc886e2dbd3fbe3b26ea8e56be0524a" + integrity sha512-S7szxlaIuiy5UqLhLL1KY3aoyGHbZzsTpYal9eYMwCyKqoqoVLCmIgAgNyIM1FhnP2KyBygASJxdhejrzjMb+w== + dependencies: + lodash "^4.17.21" + react-router-dom@5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.2.0.tgz#9e65a4d0c45e13289e66c7b17c7e175d0ea15662" @@ -10368,25 +12169,30 @@ react-side-effect@^2.1.0: resolved "https://registry.yarnpkg.com/react-side-effect/-/react-side-effect-2.1.1.tgz#66c5701c3e7560ab4822a4ee2742dee215d72eb3" integrity sha512-2FoTQzRNTncBVtnzxFOk2mCpcfxQpenBMbk5kSVBg5UcPqV9fRbgY2zhb7GTWWOlpFmAxhClBDlIq8Rsubz1yQ== -react-toastify@^5.0.1: - version "5.5.0" - resolved "https://registry.yarnpkg.com/react-toastify/-/react-toastify-5.5.0.tgz#f55de44f6b5e3ce3b13b69e5bb4427f2c9404822" - integrity sha512-jsVme7jALIFGRyQsri/g4YTsRuaaGI70T6/ikjwZMB4mwTZaCWqj5NqxhGrRStKlJc5npXKKvKeqTiRGQl78LQ== +react-smooth@^2.0.4: + version "2.0.5" + resolved "https://registry.yarnpkg.com/react-smooth/-/react-smooth-2.0.5.tgz#d153b7dffc7143d0c99e82db1532f8cf93f20ecd" + integrity sha512-BMP2Ad42tD60h0JW6BFaib+RJuV5dsXJK9Baxiv/HlNFjvRLqA9xrNKxVWnUIZPQfzUwGXIlU/dSYLU+54YGQA== dependencies: - "@babel/runtime" "^7.4.2" - classnames "^2.2.6" - prop-types "^15.7.2" - react-transition-group "^4" + fast-equals "^5.0.0" + react-transition-group "2.9.0" -react-transition-group@^4: - version "4.4.2" - resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.2.tgz#8b59a56f09ced7b55cbd53c36768b922890d5470" - integrity sha512-/RNYfRAMlZwDSr6z4zNKV6xu53/e2BuaBbGhbyYIXTrmgu/bGHzmqOs7mJSJBHy9Ud+ApHx3QjrkKSp1pxvlFg== +react-toastify@9.0.8: + version "9.0.8" + resolved "https://registry.yarnpkg.com/react-toastify/-/react-toastify-9.0.8.tgz#3876c89fc6211a29027b3075010b5ec39ebe4f7e" + integrity sha512-EwM+teWt49HSHx+67qI08yLAW1zAsBxCXLCsUfxHYv1W7/R3ZLhrqKalh7j+kjgPna1h5LQMSMwns4tB4ww2yQ== dependencies: - "@babel/runtime" "^7.5.5" - dom-helpers "^5.0.1" + clsx "^1.1.1" + +react-transition-group@2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-2.9.0.tgz#df9cdb025796211151a436c69a8f3b97b5b07c8d" + integrity sha512-+HzNTCHpeQyl4MJ/bdE0u6XRMe9+XG/+aL4mCxVN4DnPBQ0/5bfHWPDuOZUzYdMj94daZaZdCCc1Dzt9R/xSSg== + dependencies: + dom-helpers "^3.4.0" loose-envify "^1.4.0" prop-types "^15.6.2" + react-lifecycles-compat "^3.0.4" react@^16.9.3: version "16.14.0" @@ -10479,6 +12285,28 @@ readdirp@~3.5.0: dependencies: picomatch "^2.2.1" +recharts-scale@^0.4.4: + version "0.4.5" + resolved "https://registry.yarnpkg.com/recharts-scale/-/recharts-scale-0.4.5.tgz#0969271f14e732e642fcc5bd4ab270d6e87dd1d9" + integrity sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w== + dependencies: + decimal.js-light "^2.4.1" + +recharts@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/recharts/-/recharts-2.9.0.tgz#dde7531298cffe8677b1206967830d34f7972ea6" + integrity sha512-cVgiAU3W5UrA8nRRV/N0JrudgZzY/vjkzrlShbH+EFo1vs4nMlXgshZWLI0DfDLmn4/p4pF7Lq7F5PU+K94Ipg== + dependencies: + classnames "^2.2.5" + eventemitter3 "^4.0.1" + lodash "^4.17.19" + react-is "^16.10.2" + react-resize-detector "^8.0.4" + react-smooth "^2.0.4" + recharts-scale "^0.4.4" + tiny-invariant "^1.3.1" + victory-vendor "^36.6.8" + rechoir@^0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.7.0.tgz#32650fd52c21ab252aa5d65b19310441c7e03aca" @@ -10560,6 +12388,11 @@ regenerator-runtime@^0.13.4, regenerator-runtime@^0.13.7: resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz#cac2dacc8a1ea675feaabaeb8ae833898ae46f55" integrity sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew== +regenerator-runtime@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" + integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== + regenerator-transform@^0.14.2: version "0.14.5" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.5.tgz#c98da154683671c9c4dcb16ece736517e1b7feb4" @@ -10770,6 +12603,11 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= +resolve.exports@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.2.tgz#f8c934b8e6a13f539e38b7098e2e36134f01e800" + integrity sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg== + resolve@1.18.1: version "1.18.1" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.18.1.tgz#018fcb2c5b207d2a6424aee361c5a266da8f4130" @@ -10786,6 +12624,15 @@ resolve@^1.10.0, resolve@^1.12.0, resolve@^1.13.1, resolve@^1.14.2, resolve@^1.1 is-core-module "^2.2.0" path-parse "^1.0.6" +resolve@^1.20.0: + version "1.22.8" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" + integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== + dependencies: + is-core-module "^2.13.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + resolve@^2.0.0-next.3: version "2.0.0-next.3" resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.3.tgz#d41016293d4a8586a39ca5d9b5f15cbea1f55e46" @@ -10915,6 +12762,13 @@ rxjs@^6.6.6: dependencies: tslib "^1.9.0" +rxjs@^7.8.1: + version "7.8.1" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543" + integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg== + dependencies: + tslib "^2.1.0" + safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" @@ -11029,6 +12883,13 @@ scroll-into-view-if-needed@^3.0.3: dependencies: compute-scroll-into-view "^3.0.2" +scroll-into-view-if-needed@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz#fa9524518c799b45a2ef6bbffb92bcad0296d01f" + integrity sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ== + dependencies: + compute-scroll-into-view "^3.0.2" + select-hose@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" @@ -11061,6 +12922,11 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== +semver@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== + semver@^7.2.1, semver@^7.3.2, semver@^7.3.4: version "7.3.5" resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" @@ -11068,6 +12934,13 @@ semver@^7.2.1, semver@^7.3.2, semver@^7.3.4: dependencies: lru-cache "^6.0.0" +semver@^7.5.3, semver@^7.5.4: + version "7.5.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" + integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== + dependencies: + lru-cache "^6.0.0" + send@0.17.1: version "0.17.1" resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" @@ -11217,6 +13090,11 @@ signal-exit@^3.0.0, signal-exit@^3.0.2, signal-exit@^3.0.3: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== +signal-exit@^3.0.7: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + simple-swizzle@^0.2.2: version "0.2.2" resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" @@ -11334,6 +13212,14 @@ source-map-resolve@^0.6.0: atob "^2.1.2" decode-uri-component "^0.2.0" +source-map-support@0.5.13: + version "0.5.13" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932" + integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + source-map-support@^0.5.6, source-map-support@~0.5.12, source-map-support@~0.5.19: version "0.5.19" resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" @@ -11481,6 +13367,13 @@ stack-utils@^2.0.2: dependencies: escape-string-regexp "^2.0.0" +stack-utils@^2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f" + integrity sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ== + dependencies: + escape-string-regexp "^2.0.0" + stackframe@^0.3.1: version "0.3.1" resolved "https://registry.yarnpkg.com/stackframe/-/stackframe-0.3.1.tgz#33aa84f1177a5548c8935533cbfeb3420975f5a4" @@ -11582,6 +13475,15 @@ string-width@^4.1.0, string-width@^4.2.0: is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.0" +string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string.prototype.matchall@^4.0.4: version "4.0.4" resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.4.tgz#608f255e93e072107f5de066f81a2dfb78cf6b29" @@ -11655,6 +13557,13 @@ strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" +strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-bom@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" @@ -11738,6 +13647,13 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" +supports-color@^8.0.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + supports-hyperlinks@^2.0.0: version "2.2.0" resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz#4f77b42488765891774b70c79babd87f9bd594bb" @@ -11746,6 +13662,11 @@ supports-hyperlinks@^2.0.0: has-flag "^4.0.0" supports-color "^7.0.0" +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + svg-parser@^2.0.2: version "2.0.4" resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5" @@ -11933,17 +13854,27 @@ tiny-invariant@^1.0.2: resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.1.0.tgz#634c5f8efdc27714b7f386c35e6760991d230875" integrity sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw== +tiny-invariant@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.3.1.tgz#8560808c916ef02ecfd55e66090df23a4b7aa642" + integrity sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw== + tiny-warning@^1.0.0, tiny-warning@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.3.tgz#94a30db453df4c643d0fd566060d60a875d84754" integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA== -tippy.js@^4.3.4: - version "4.3.5" - resolved "https://registry.yarnpkg.com/tippy.js/-/tippy.js-4.3.5.tgz#882bff8d92f09bb0546d2826d5668c0560006f54" - integrity sha512-NDq3efte8nGK6BOJ1dDN1/WelAwfmh3UtIYXXck6+SxLzbIQNZE/cmRSnwScZ/FyiKdIcvFHvYUgqmoGx8CcyA== +tippy.js@^5.1.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/tippy.js/-/tippy.js-5.2.1.tgz#e08d7332c103a15e427124d710d881fca82365d6" + integrity sha512-66UT6JRVn3dXNCORE+0UvUK3JZqV/VhLlU6HTDm3FmrweUUFUxUGvT8tUQ7ycMp+uhuLAwQw6dBabyC+iKf/MA== dependencies: - popper.js "^1.14.7" + popper.js "^1.16.0" + +tmpl@1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc" + integrity sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw== tmpl@1.0.x: version "1.0.4" @@ -12041,6 +13972,20 @@ tryer@^1.0.1: resolved "https://registry.yarnpkg.com/tryer/-/tryer-1.0.1.tgz#f2c85406800b9b0f74c9f7465b81eaad241252f8" integrity sha512-c3zayb8/kWWpycWYg87P71E1S1ZL6b6IJxfb5fvsUgsf0S2MVGaDhDXXjDMpdCpfWXqptc+4mXwmiy1ypXqRAA== +ts-jest@^29.1.1: + version "29.1.1" + resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.1.1.tgz#f58fe62c63caf7bfcc5cc6472082f79180f0815b" + integrity sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA== + dependencies: + bs-logger "0.x" + fast-json-stable-stringify "2.x" + jest-util "^29.0.0" + json5 "^2.2.3" + lodash.memoize "4.x" + make-error "1.x" + semver "^7.5.3" + yargs-parser "^21.0.1" + ts-loader@^8.0.17: version "8.2.0" resolved "https://registry.yarnpkg.com/ts-loader/-/ts-loader-8.2.0.tgz#6a3aeaa378aecda543e2ed2c332d3123841d52e0" @@ -12077,6 +14022,11 @@ tslib@^2.0.3: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.2.0.tgz#fb2c475977e35e241311ede2693cee1ec6698f5c" integrity sha512-gS9GVHRU+RGn5KQM2rllAlR3dU6m7AcpJKdtH8gFvQiC4Otgk98XnmMU+nZenHt/+VhnBPWwgrJsyrdcw6i23w== +tslib@^2.1.0: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== + tsutils@^3.17.1: version "3.21.0" resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.21.0.tgz#b48717d394cea6c1e096983eed58e9d61715b623" @@ -12175,10 +14125,10 @@ typedarray@^0.0.6: resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= -typescript@^4.0.3: - version "4.3.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.3.5.tgz#4d1c37cc16e893973c45a06886b7113234f119f4" - integrity sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA== +typescript@^4.9.5: + version "4.9.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a" + integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== typescript@^5.0.4: version "5.0.4" @@ -12195,6 +14145,11 @@ unbox-primitive@^1.0.0: has-symbols "^1.0.2" which-boxed-primitive "^1.0.2" +undici-types@~5.26.4: + version "5.26.5" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" + integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== + unicode-canonical-property-names-ecmascript@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" @@ -12292,6 +14247,14 @@ upath@^1.1.1, upath@^1.1.2, upath@^1.2.0: resolved "https://registry.yarnpkg.com/upath/-/upath-1.2.0.tgz#8f66dbcd55a883acdae4408af8b035a5044c1894" integrity sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg== +update-browserslist-db@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4" + integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg== + dependencies: + escalade "^3.1.1" + picocolors "^1.0.0" + uri-js@^4.2.2: version "4.4.1" resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" @@ -12391,6 +14354,11 @@ uuid@^8.3.0: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +uuid@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== + v8-compile-cache@^2.0.3, v8-compile-cache@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz#2de19618c66dc247dcfb6f99338035d8245a2cee" @@ -12405,6 +14373,15 @@ v8-to-istanbul@^7.0.0: convert-source-map "^1.6.0" source-map "^0.7.3" +v8-to-istanbul@^9.0.1: + version "9.2.0" + resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz#2ed7644a245cddd83d4e087b9b33b3e62dfd10ad" + integrity sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA== + dependencies: + "@jridgewell/trace-mapping" "^0.3.12" + "@types/istanbul-lib-coverage" "^2.0.1" + convert-source-map "^2.0.0" + validate-npm-package-license@^3.0.1: version "3.0.4" resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" @@ -12437,6 +14414,26 @@ verror@1.10.0: core-util-is "1.0.2" extsprintf "^1.2.0" +victory-vendor@^36.6.8: + version "36.6.11" + resolved "https://registry.yarnpkg.com/victory-vendor/-/victory-vendor-36.6.11.tgz#acae770717c2dae541a54929c304ecab5ab6ac2a" + integrity sha512-nT8kCiJp8dQh8g991J/R5w5eE2KnO8EAIP0xocWlh9l2okngMWglOPoMZzJvek8Q1KUc4XE/mJxTZnvOB1sTYg== + dependencies: + "@types/d3-array" "^3.0.3" + "@types/d3-ease" "^3.0.0" + "@types/d3-interpolate" "^3.0.1" + "@types/d3-scale" "^4.0.2" + "@types/d3-shape" "^3.1.0" + "@types/d3-time" "^3.0.0" + "@types/d3-timer" "^3.0.0" + d3-array "^3.1.6" + d3-ease "^3.0.1" + d3-interpolate "^3.0.1" + d3-scale "^4.0.2" + d3-shape "^3.1.0" + d3-time "^3.0.0" + d3-timer "^3.0.1" + vm-browserify@^1.0.1: version "1.1.2" resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-1.1.2.tgz#78641c488b8e6ca91a75f511e7a3b32a86e5dda0" @@ -12463,6 +14460,13 @@ walker@^1.0.7, walker@~1.0.5: dependencies: makeerror "1.0.x" +walker@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" + integrity sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ== + dependencies: + makeerror "1.0.12" + warning@^4.0.1: version "4.0.3" resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" @@ -12974,6 +14978,15 @@ wrap-ansi@^6.2.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" @@ -12989,6 +15002,14 @@ write-file-atomic@^3.0.0: signal-exit "^3.0.2" typedarray-to-buffer "^3.1.5" +write-file-atomic@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd" + integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg== + dependencies: + imurmurhash "^0.1.4" + signal-exit "^3.0.7" + ws@^6.2.1: version "6.2.1" resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb" @@ -13016,16 +15037,26 @@ xtend@^4.0.0, xtend@~4.0.1: resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== -xterm@2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/xterm/-/xterm-2.4.0.tgz#d70227993b74323e36495ab9c7bdee0bc8d0dbba" - integrity sha1-1wInmTt0Mj42SVq5x73uC8jQ27o= +xterm-addon-fit@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/xterm-addon-fit/-/xterm-addon-fit-0.5.0.tgz#2d51b983b786a97dcd6cde805e700c7f913bc596" + integrity sha512-DsS9fqhXHacEmsPxBJZvfj2la30Iz9xk+UKjhQgnYNkrUIN5CYLbw7WEfz117c7+S86S/tpHPfvNxJsF5/G8wQ== + +xterm@^4.19.0: + version "4.19.0" + resolved "https://registry.yarnpkg.com/xterm/-/xterm-4.19.0.tgz#c0f9d09cd61de1d658f43ca75f992197add9ef6d" + integrity sha512-c3Cp4eOVsYY5Q839dR5IejghRPpxciGmLWWaP9g+ppfMeBChMeLa1DCA+pmX/jyDZ+zxFOmlJL/82qVdayVoGQ== y18n@^4.0.0: version "4.0.3" resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + yallist@^3.0.2: version "3.1.1" resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" @@ -13057,6 +15088,11 @@ yargs-parser@^18.1.2: camelcase "^5.0.0" decamelize "^1.2.0" +yargs-parser@^21.0.1, yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + yargs@^13.3.2: version "13.3.2" resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" @@ -13090,6 +15126,19 @@ yargs@^15.4.1: y18n "^4.0.0" yargs-parser "^18.1.2" +yargs@^17.3.1: + version "17.7.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" diff --git a/utils/analysis/factory.go b/utils/analysis/factory.go index 278f24a19a..0e17e587a7 100644 --- a/utils/analysis/factory.go +++ b/utils/analysis/factory.go @@ -243,7 +243,7 @@ func ValidateMetric(metric v1alpha1.Metric) error { func extractValueFromRollout(r *v1alpha1.Rollout, path string) (string, error) { j, _ := json.Marshal(r) - m := interface{}(nil) + m := any(nil) json.Unmarshal(j, &m) sections := regexp.MustCompile("[\\.\\[\\]]+").Split(path, -1) for _, section := range sections { @@ -251,7 +251,7 @@ func extractValueFromRollout(r *v1alpha1.Rollout, path string) (string, error) { continue // if path ends with a separator char, Split returns an empty last section } - if asArray, ok := m.([]interface{}); ok { + if asArray, ok := m.([]any); ok { if i, err := strconv.Atoi(section); err != nil { return "", fmt.Errorf("invalid index '%s'", section) } else if i >= len(asArray) { @@ -259,7 +259,7 @@ func extractValueFromRollout(r *v1alpha1.Rollout, path string) (string, error) { } else { m = asArray[i] } - } else if asMap, ok := m.(map[string]interface{}); ok { + } else if asMap, ok := m.(map[string]any); ok { m = asMap[section] } else { return "", fmt.Errorf("invalid path %s in rollout", path) @@ -271,8 +271,8 @@ func extractValueFromRollout(r *v1alpha1.Rollout, path string) (string, error) { } var isArray, isMap bool - _, isArray = m.([]interface{}) - _, isMap = m.(map[string]interface{}) + _, isArray = m.([]any) + _, isMap = m.(map[string]any) if isArray || isMap { return "", fmt.Errorf("path %s in rollout must terminate in a primitive value", path) } diff --git a/utils/analysis/helpers.go b/utils/analysis/helpers.go index caedeb472c..42ded67315 100644 --- a/utils/analysis/helpers.go +++ b/utils/analysis/helpers.go @@ -288,7 +288,10 @@ func CreateWithCollisionCounter(logCtx *log.Entry, analysisRunIf argoprojclient. } } -func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, measurementRetentionMetrics []v1alpha1.MeasurementRetention, name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { +func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate, args []v1alpha1.Argument, dryRunMetrics []v1alpha1.DryRun, + measurementRetentionMetrics []v1alpha1.MeasurementRetention, + labels map[string]string, annotations map[string]string, + name, generateName, namespace string) (*v1alpha1.AnalysisRun, error) { template, err := FlattenTemplates(templates, clusterTemplates) if err != nil { return nil, err @@ -310,6 +313,8 @@ func NewAnalysisRunFromTemplates(templates []*v1alpha1.AnalysisTemplate, cluster Name: name, GenerateName: generateName, Namespace: namespace, + Labels: labels, + Annotations: annotations, }, Spec: v1alpha1.AnalysisRunSpec{ Metrics: template.Spec.Metrics, @@ -541,9 +546,9 @@ func NewAnalysisRunFromUnstructured(obj *unstructured.Unstructured, templateArgs } // Set args - newArgVals := []interface{}{} + newArgVals := []any{} for i := 0; i < len(newArgs); i++ { - var newArgInterface map[string]interface{} + var newArgInterface map[string]any newArgBytes, err := json.Marshal(newArgs[i]) if err != nil { return nil, err @@ -580,3 +585,23 @@ func GetInstanceID(obj runtime.Object) string { } return "" } + +func FilterUniqueTemplates(templates []*v1alpha1.AnalysisTemplate, clusterTemplates []*v1alpha1.ClusterAnalysisTemplate) ([]*v1alpha1.AnalysisTemplate, []*v1alpha1.ClusterAnalysisTemplate) { + uniqueTemplates := []*v1alpha1.AnalysisTemplate{} + uniqueClusterTemplates := []*v1alpha1.ClusterAnalysisTemplate{} + seenTemplates := map[string]bool{} + seenClusterTemplates := map[string]bool{} + for _, template := range templates { + if !seenTemplates[template.Name] { + seenTemplates[template.Name] = true + uniqueTemplates = append(uniqueTemplates, template) + } + } + for _, clusterTemplate := range clusterTemplates { + if !seenClusterTemplates[clusterTemplate.Name] { + seenClusterTemplates[clusterTemplate.Name] = true + uniqueClusterTemplates = append(uniqueClusterTemplates, clusterTemplate) + } + } + return uniqueTemplates, uniqueClusterTemplates +} diff --git a/utils/analysis/helpers_test.go b/utils/analysis/helpers_test.go index 3fbefadce4..de1aa97d58 100644 --- a/utils/analysis/helpers_test.go +++ b/utils/analysis/helpers_test.go @@ -646,7 +646,29 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { } args := []v1alpha1.Argument{arg, secretArg} - run, err := NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") + labels := make(map[string]string) + annotations := make(map[string]string) + + run, err := NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") + assert.NoError(t, err) + assert.Equal(t, "foo-run", run.Name) + assert.Equal(t, "foo-run-generate-", run.GenerateName) + assert.Equal(t, "my-ns", run.Namespace) + + assert.Len(t, run.Spec.Args, 2) + assert.Contains(t, run.Spec.Args, arg) + assert.Contains(t, run.Spec.Args, secretArg) + + assert.Len(t, run.Labels, 0) + assert.Len(t, run.Labels, 0) + + // With additionnal labels and annotations + labels["foo"] = "bar" + labels["foo2"] = "bar2" + annotations["bar"] = "foo" + annotations["bar2"] = "foo2" + + run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -656,10 +678,17 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { assert.Contains(t, run.Spec.Args, arg) assert.Contains(t, run.Spec.Args, secretArg) + assert.Len(t, run.Labels, 2) + assert.Equal(t, run.Labels["foo"], "bar") + assert.Equal(t, run.Labels["foo2"], "bar2") + assert.Len(t, run.Annotations, 2) + assert.Equal(t, run.Annotations["bar"], "foo") + assert.Equal(t, run.Annotations["bar2"], "foo2") + // Fail Merge Args unresolvedArg := v1alpha1.Argument{Name: "unresolved"} templates[0].Spec.Args = append(templates[0].Spec.Args, unresolvedArg) - run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") + run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") assert.Nil(t, run) assert.Equal(t, fmt.Errorf("args.unresolved was not resolved"), err) // Fail flatten metric @@ -672,7 +701,7 @@ func TestNewAnalysisRunFromTemplates(t *testing.T) { } // Fail Flatten Templates templates = append(templates, matchingMetric) - run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, "foo-run", "foo-run-generate-", "my-ns") + run, err = NewAnalysisRunFromTemplates(templates, clusterTemplates, args, []v1alpha1.DryRun{}, []v1alpha1.MeasurementRetention{}, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") assert.Nil(t, run) assert.Equal(t, fmt.Errorf("two metrics have the same name 'success-rate'"), err) } @@ -822,7 +851,7 @@ func TestNewAnalysisRunFromUnstructured(t *testing.T) { assert.Equal(t, len(args), len(arArgs)) for i, arg := range arArgs { - argnv := arg.(map[string]interface{}) + argnv := arg.(map[string]any) assert.Equal(t, *args[i].Value, argnv["value"]) } } @@ -852,8 +881,10 @@ func TestCompatibilityNewAnalysisRunFromTemplate(t *testing.T) { Value: pointer.StringPtr("my-val"), }, } + labels := make(map[string]string) + annotations := make(map[string]string) analysisTemplates := []*v1alpha1.AnalysisTemplate{&template} - run, err := NewAnalysisRunFromTemplates(analysisTemplates, nil, args, nil, nil, "foo-run", "foo-run-generate-", "my-ns") + run, err := NewAnalysisRunFromTemplates(analysisTemplates, nil, args, nil, nil, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -887,8 +918,10 @@ func TestCompatibilityNewAnalysisRunFromClusterTemplate(t *testing.T) { Value: pointer.StringPtr("my-val"), }, } + labels := make(map[string]string) + annotations := make(map[string]string) clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{&clusterTemplate} - run, err := NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, nil, nil, "foo-run", "foo-run-generate-", "my-ns") + run, err := NewAnalysisRunFromTemplates(nil, clusterAnalysisTemplates, args, nil, nil, labels, annotations, "foo-run", "foo-run-generate-", "my-ns") assert.NoError(t, err) assert.Equal(t, "foo-run", run.Name) assert.Equal(t, "foo-run-generate-", run.GenerateName) @@ -1082,3 +1115,83 @@ func TestGetMeasurementRetentionMetrics(t *testing.T) { assert.Equal(t, len(measurementRetentionMetricNamesMap), 0) }) } + +func TestAnalysisTemplateFiltering(t *testing.T) { + t.Run("FilterAnalysisTemplates is returning empty arrays when empty arrays are provided in parameters", func(t *testing.T) { + + analysisTemplates := []*v1alpha1.AnalysisTemplate{} + clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{} + + filteredAnalysisTemplates, filteredClusterAnalysisTemplates := FilterUniqueTemplates(analysisTemplates, clusterAnalysisTemplates) + + assert.Equal(t, len(filteredAnalysisTemplates), 0) + assert.Equal(t, len(filteredClusterAnalysisTemplates), 0) + + }) + + t.Run("FilterAnalysisTemplates is not filtering analysisTemplates duplications if there are none in the reference tree", func(t *testing.T) { + + analysisTemplates := []*v1alpha1.AnalysisTemplate{ + analysisTemplate("foo"), + analysisTemplate("bar"), + analysisTemplate("foo-2"), + analysisTemplate("foo-3"), + } + clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{ + clusterAnalysisTemplate("cluster-foo"), + clusterAnalysisTemplate("cluster-bar"), + clusterAnalysisTemplate("cluster-foo-2"), + clusterAnalysisTemplate("cluster-foo-3"), + } + + filteredAnalysisTemplates, filteredClusterAnalysisTemplates := FilterUniqueTemplates(analysisTemplates, clusterAnalysisTemplates) + + assert.Equal(t, len(filteredAnalysisTemplates), 4) + assert.Equal(t, len(filteredClusterAnalysisTemplates), 4) + + }) + t.Run("FilterAnalysisTemplates is filtering analysisTemplates duplications in the reference tree", func(t *testing.T) { + + analysisTemplates := []*v1alpha1.AnalysisTemplate{ + analysisTemplate("foo"), + analysisTemplate("foo"), + analysisTemplate("foo-2"), + analysisTemplate("foo-3"), + analysisTemplate("foo-3"), + } + clusterAnalysisTemplates := []*v1alpha1.ClusterAnalysisTemplate{ + clusterAnalysisTemplate("cluster-foo"), + clusterAnalysisTemplate("cluster-foo"), + clusterAnalysisTemplate("cluster-bar"), + clusterAnalysisTemplate("cluster-bar"), + clusterAnalysisTemplate("cluster-bar"), + clusterAnalysisTemplate("cluster-bar"), + clusterAnalysisTemplate("cluster-foo-2"), + clusterAnalysisTemplate("cluster-foo-2"), + clusterAnalysisTemplate("cluster-foo-2"), + clusterAnalysisTemplate("cluster-foo-3"), + } + + filteredAnalysisTemplates, filteredClusterAnalysisTemplates := FilterUniqueTemplates(analysisTemplates, clusterAnalysisTemplates) + + assert.Equal(t, len(filteredAnalysisTemplates), 3) + assert.Equal(t, len(filteredClusterAnalysisTemplates), 4) + + }) +} + +func analysisTemplate(name string) *v1alpha1.AnalysisTemplate { + return &v1alpha1.AnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +func clusterAnalysisTemplate(name string) *v1alpha1.ClusterAnalysisTemplate { + return &v1alpha1.ClusterAnalysisTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} diff --git a/utils/annotations/annotations.go b/utils/annotations/annotations.go index 80cec5ce2a..067492bc2b 100644 --- a/utils/annotations/annotations.go +++ b/utils/annotations/annotations.go @@ -8,6 +8,7 @@ import ( log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" @@ -27,6 +28,8 @@ const ( DesiredReplicasAnnotation = RolloutLabel + "/desired-replicas" // WorkloadGenerationAnnotation is the generation of the referenced workload WorkloadGenerationAnnotation = RolloutLabel + "/workload-generation" + // NotificationEngineAnnotation the annotation notification engine uses to determine if it should notify + NotificationEngineAnnotation = "notified.notifications.argoproj.io" ) // GetDesiredReplicasAnnotation returns the number of desired replicas @@ -52,17 +55,30 @@ func GetWorkloadGenerationAnnotation(ro *v1alpha1.Rollout) (int32, bool) { } // GetRevisionAnnotation returns revision of rollout -func GetRevisionAnnotation(ro *v1alpha1.Rollout) (int32, bool) { - if ro == nil { +func GetRevisionAnnotation(anyObj metav1.Object) (int32, bool) { + + if anyObj == nil { + return 0, false + } + var obj interface{} + switch anyObj.(type) { + case *v1alpha1.Rollout: + obj = anyObj.(*v1alpha1.Rollout) + case *v1alpha1.AnalysisRun: + obj = anyObj.(*v1alpha1.AnalysisRun) + default: + log.Warnf("object not supported type: %T", anyObj) return 0, false } - annotationValue, ok := ro.Annotations[RevisionAnnotation] + + annotationValue, ok := obj.(metav1.Object).GetAnnotations()[RevisionAnnotation] if !ok { - return int32(0), false + return 0, false } + intValue, err := strconv.ParseInt(annotationValue, 10, 32) if err != nil { - log.Warnf("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, RevisionAnnotation, ro.Name) + log.Warnf("Cannot convert the value %q with annotation key %q for the object %q", annotationValue, RevisionAnnotation, anyObj.GetName()) return int32(0), false } return int32(intValue), true @@ -205,6 +221,7 @@ var annotationsToSkip = map[string]bool{ RevisionAnnotation: true, RevisionHistoryAnnotation: true, DesiredReplicasAnnotation: true, + NotificationEngineAnnotation: true, } // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key diff --git a/utils/annotations/annotations_test.go b/utils/annotations/annotations_test.go index 3ad136fd7e..239ab83fd3 100644 --- a/utils/annotations/annotations_test.go +++ b/utils/annotations/annotations_test.go @@ -483,4 +483,16 @@ func TestGetRevisionAnnotation(t *testing.T) { }) assert.False(t, found) assert.Equal(t, int32(0), rev) + + revAR, found := GetRevisionAnnotation(&v1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + RevisionAnnotation: "1", + }, + }, + }) + assert.True(t, found) + assert.Equal(t, int32(1), revAR) } diff --git a/utils/aws/aws.go b/utils/aws/aws.go index b8884a0589..42b4907836 100644 --- a/utils/aws/aws.go +++ b/utils/aws/aws.go @@ -300,7 +300,7 @@ func GetTargetGroupBindingsByService(ctx context.Context, dynamicClient dynamic. return tgbs, nil } -func toTargetGroupBinding(obj map[string]interface{}) (*TargetGroupBinding, error) { +func toTargetGroupBinding(obj map[string]any) (*TargetGroupBinding, error) { data, err := json.Marshal(obj) if err != nil { return nil, err @@ -368,7 +368,7 @@ func VerifyTargetGroupBinding(ctx context.Context, logCtx *log.Entry, awsClnt Cl logCtx.Warn("Unable to match TargetGroupBinding spec.serviceRef.port to Service spec.ports") return nil, nil } - logCtx = logCtx.WithFields(map[string]interface{}{ + logCtx = logCtx.WithFields(map[string]any{ "service": svc.Name, "targetgroupbinding": tgb.Name, "tg": tgb.Spec.TargetGroupARN, diff --git a/utils/controller/controller.go b/utils/controller/controller.go index 63aa6f11cd..b5c1fc875b 100644 --- a/utils/controller/controller.go +++ b/utils/controller/controller.go @@ -6,6 +6,8 @@ import ( "runtime/debug" "time" + "k8s.io/apimachinery/pkg/api/errors" + log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -115,7 +117,7 @@ func processNextWorkItem(ctx context.Context, workqueue workqueue.RateLimitingIn } // We wrap this block in a func so we can defer c.workqueue.Done. - err := func(obj interface{}) error { + err := func(obj any) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do @@ -157,6 +159,11 @@ func processNextWorkItem(ctx context.Context, workqueue workqueue.RateLimitingIn if err := runSyncHandler(); err != nil { logCtx.Errorf("%s syncHandler error: %v", objType, err) metricsServer.IncError(namespace, name, objType) + + if errors.IsNotFound(err) { + workqueue.Forget(obj) + return nil + } // Put the item back on // the workqueue to handle any transient errors. workqueue.AddRateLimited(key) @@ -179,14 +186,14 @@ func processNextWorkItem(ctx context.Context, workqueue workqueue.RateLimitingIn } // metaNamespaceKeyFunc is a wrapper around cache.MetaNamespaceKeyFunc but also accepts strings -func metaNamespaceKeyFunc(obj interface{}) (string, error) { +func metaNamespaceKeyFunc(obj any) (string, error) { if objStr, ok := obj.(string); ok { obj = cache.ExplicitKey(objStr) } return cache.MetaNamespaceKeyFunc(obj) } -func Enqueue(obj interface{}, q workqueue.RateLimitingInterface) { +func Enqueue(obj any, q workqueue.RateLimitingInterface) { var key string var err error if key, err = metaNamespaceKeyFunc(obj); err != nil { @@ -196,7 +203,7 @@ func Enqueue(obj interface{}, q workqueue.RateLimitingInterface) { q.Add(key) } -func EnqueueAfter(obj interface{}, duration time.Duration, q workqueue.RateLimitingInterface) { +func EnqueueAfter(obj any, duration time.Duration, q workqueue.RateLimitingInterface) { var key string var err error if key, err = metaNamespaceKeyFunc(obj); err != nil { @@ -206,7 +213,7 @@ func EnqueueAfter(obj interface{}, duration time.Duration, q workqueue.RateLimit q.AddAfter(key, duration) } -func EnqueueRateLimited(obj interface{}, q workqueue.RateLimitingInterface) { +func EnqueueRateLimited(obj any, q workqueue.RateLimitingInterface) { var key string var err error if key, err = metaNamespaceKeyFunc(obj); err != nil { @@ -222,7 +229,7 @@ func EnqueueRateLimited(obj interface{}, q workqueue.RateLimitingInterface) { // It then enqueues that ownerType resource to be processed. If the object does not // have an appropriate OwnerReference, it will simply be skipped. // This function assumes parent object is in the same namespace as the child -func EnqueueParentObject(obj interface{}, ownerType string, enqueue func(obj interface{})) { +func EnqueueParentObject(obj any, ownerType string, enqueue func(obj any), parentGetter ...func(any) (*metav1.OwnerReference, string)) { var object metav1.Object var ok bool if object, ok = obj.(metav1.Object); !ok { @@ -239,12 +246,26 @@ func EnqueueParentObject(obj interface{}, ownerType string, enqueue func(obj int log.Infof("Recovered deleted object '%s' from tombstone", object.GetName()) } - if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + var ( + ownerRef *metav1.OwnerReference + namespace string + ) + + if len(parentGetter) > 0 { + ownerRef, namespace = parentGetter[0](obj) + } else { + ownerRef = metav1.GetControllerOf(object) + } + + if ownerRef != nil { // If this object is not owned by the ownerType, we should not do anything more with it. if ownerRef.Kind != ownerType { return } - namespace := object.GetNamespace() + // if namespace not set by parentGetter use object namespace + if namespace == "" { + namespace = object.GetNamespace() + } parent := cache.ExplicitKey(namespace + "/" + ownerRef.Name) log.Infof("Enqueueing parent of %s/%s: %s %s", namespace, object.GetName(), ownerRef.Kind, parent) enqueue(parent) diff --git a/utils/controller/controller_test.go b/utils/controller/controller_test.go index 9fa54b5517..3761d05ba5 100644 --- a/utils/controller/controller_test.go +++ b/utils/controller/controller_test.go @@ -169,7 +169,7 @@ func TestEnqueueParentObjectInvalidObject(t *testing.T) { errorMessages = append(errorMessages, err) }) invalidObject := "invalid-object" - enqueueFunc := func(obj interface{}) {} + enqueueFunc := func(obj any) {} EnqueueParentObject(invalidObject, register.RolloutKind, enqueueFunc) assert.Len(t, errorMessages, 1) assert.Error(t, errorMessages[0], "error decoding object, invalid type") @@ -182,7 +182,7 @@ func TestEnqueueParentObjectInvalidTombstoneObject(t *testing.T) { }) invalidObject := cache.DeletedFinalStateUnknown{} - enqueueFunc := func(obj interface{}) {} + enqueueFunc := func(obj any) {} EnqueueParentObject(invalidObject, register.RolloutKind, enqueueFunc) assert.Len(t, errorMessages, 1) assert.Equal(t, "error decoding object tombstone, invalid type", errorMessages[0]) @@ -199,8 +199,8 @@ func TestEnqueueParentObjectNoOwner(t *testing.T) { Namespace: "default", }, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } EnqueueParentObject(rs, register.RolloutKind, enqueueFunc) @@ -228,8 +228,8 @@ func TestEnqueueParentObjectDifferentOwnerKind(t *testing.T) { OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, experimentKind)}, }, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } EnqueueParentObject(rs, register.RolloutKind, enqueueFunc) @@ -257,8 +257,8 @@ func TestEnqueueParentObjectOtherOwnerTypes(t *testing.T) { OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(deployment, deploymentKind)}, }, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } EnqueueParentObject(rs, "Deployment", enqueueFunc) @@ -286,8 +286,8 @@ func TestEnqueueParentObjectEnqueueExperiment(t *testing.T) { OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, experimentKind)}, }, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } client := fake.NewSimpleClientset(experiment) @@ -319,8 +319,8 @@ func TestEnqueueParentObjectEnqueueRollout(t *testing.T) { OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rollout, rolloutKind)}, }, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } client := fake.NewSimpleClientset(rollout) @@ -356,8 +356,8 @@ func TestEnqueueParentObjectRecoverTombstoneObject(t *testing.T) { Obj: rs, } - enqueuedObjs := make([]interface{}, 0) - enqueueFunc := func(obj interface{}) { + enqueuedObjs := make([]any, 0) + enqueueFunc := func(obj any) { enqueuedObjs = append(enqueuedObjs, obj) } client := fake.NewSimpleClientset(experiment) @@ -388,10 +388,10 @@ func TestInstanceIDRequirement(t *testing.T) { } func newObj(name, kind, apiVersion string) *unstructured.Unstructured { - obj := make(map[string]interface{}) + obj := make(map[string]any) obj["apiVersion"] = apiVersion obj["kind"] = kind - obj["metadata"] = map[string]interface{}{ + obj["metadata"] = map[string]any{ "name": name, "namespace": metav1.NamespaceDefault, } @@ -437,7 +437,7 @@ func TestProcessNextWatchObj(t *testing.T) { dInformer := dynamicinformers.NewDynamicSharedInformerFactory(client, func() time.Duration { return 0 }()) indexer := dInformer.ForResource(gvk).Informer().GetIndexer() indexer.AddIndexers(cache.Indexers{ - "testIndexer": func(obj interface{}) (strings []string, e error) { + "testIndexer": func(obj any) (strings []string, e error) { return []string{"default/foo"}, nil }, }) diff --git a/utils/defaults/defaults.go b/utils/defaults/defaults.go index 8c1c171219..f1c38dcef1 100644 --- a/utils/defaults/defaults.go +++ b/utils/defaults/defaults.go @@ -67,6 +67,8 @@ const ( var ( defaultVerifyTargetGroup = false + traefikAPIGroup = DefaultTraefikAPIGroup + traefikVersion = DefaultTraefikVersion istioAPIVersion = DefaultIstioVersion ambassadorAPIVersion = DefaultAmbassadorVersion smiAPIVersion = DefaultSMITrafficSplitVersion @@ -305,6 +307,22 @@ func GetSMIAPIVersion() string { return smiAPIVersion } +func SetTraefikVersion(apiVersion string) { + traefikVersion = apiVersion +} + +func GetTraefikVersion() string { + return traefikVersion +} + +func SetTraefikAPIGroup(apiGroup string) { + traefikAPIGroup = apiGroup +} + +func GetTraefikAPIGroup() string { + return traefikAPIGroup +} + func SetTargetGroupBindingAPIVersion(apiVersion string) { targetGroupBindingAPIVersion = apiVersion } diff --git a/utils/defaults/defaults_test.go b/utils/defaults/defaults_test.go index 1d981731c2..edd144d1f7 100644 --- a/utils/defaults/defaults_test.go +++ b/utils/defaults/defaults_test.go @@ -398,6 +398,16 @@ func TestSetDefaults(t *testing.T) { SetSMIAPIVersion(DefaultSMITrafficSplitVersion) assert.Equal(t, DefaultSMITrafficSplitVersion, GetSMIAPIVersion()) + SetTraefikAPIGroup("traefik.containo.us") + assert.Equal(t, "traefik.containo.us", GetTraefikAPIGroup()) + SetTraefikAPIGroup(DefaultTraefikAPIGroup) + assert.Equal(t, DefaultTraefikAPIGroup, GetTraefikAPIGroup()) + + SetTraefikVersion("traefik.containo.us/v1alpha1") + assert.Equal(t, "traefik.containo.us/v1alpha1", GetTraefikVersion()) + SetTraefikVersion(DefaultTraefikVersion) + assert.Equal(t, DefaultTraefikVersion, GetTraefikVersion()) + SetTargetGroupBindingAPIVersion("v1alpha9") assert.Equal(t, "v1alpha9", GetTargetGroupBindingAPIVersion()) SetTargetGroupBindingAPIVersion(DefaultTargetGroupBindingAPIVersion) diff --git a/utils/diff/diff.go b/utils/diff/diff.go index 458afe4654..12ac705070 100644 --- a/utils/diff/diff.go +++ b/utils/diff/diff.go @@ -7,7 +7,7 @@ import ( ) // CreateTwoWayMergePatch is a helper to construct a two-way merge patch from objects (instead of bytes) -func CreateTwoWayMergePatch(orig, new, dataStruct interface{}) ([]byte, bool, error) { +func CreateTwoWayMergePatch(orig, new, dataStruct any) ([]byte, bool, error) { origBytes, err := json.Marshal(orig) if err != nil { return nil, false, err diff --git a/utils/evaluate/evaluate.go b/utils/evaluate/evaluate.go index b725d99c0f..a2c9a607cd 100644 --- a/utils/evaluate/evaluate.go +++ b/utils/evaluate/evaluate.go @@ -14,7 +14,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) -func EvaluateResult(result interface{}, metric v1alpha1.Metric, logCtx logrus.Entry) (v1alpha1.AnalysisPhase, error) { +func EvaluateResult(result any, metric v1alpha1.Metric, logCtx logrus.Entry) (v1alpha1.AnalysisPhase, error) { successCondition := false failCondition := false var err error @@ -57,10 +57,10 @@ func EvaluateResult(result interface{}, metric v1alpha1.Metric, logCtx logrus.En } // EvalCondition evaluates the condition with the resultValue as an input -func EvalCondition(resultValue interface{}, condition string) (bool, error) { +func EvalCondition(resultValue any, condition string) (bool, error) { var err error - env := map[string]interface{}{ + env := map[string]any{ "result": valueFromPointer(resultValue), "asInt": asInt, "asFloat": asFloat, @@ -99,7 +99,7 @@ func isInf(f float64) bool { return math.IsInf(f, 0) } -func asInt(in interface{}) int64 { +func asInt(in any) int64 { switch i := in.(type) { case float64: return int64(i) @@ -135,7 +135,7 @@ func asInt(in interface{}) int64 { panic(fmt.Sprintf("asInt() not supported on %v %v", reflect.TypeOf(in), in)) } -func asFloat(in interface{}) float64 { +func asFloat(in any) float64 { switch i := in.(type) { case float64: return i @@ -188,8 +188,8 @@ func Equal(a, b []string) bool { return true } -func defaultFunc(resultValue interface{}) func(interface{}, interface{}) interface{} { - return func(_ interface{}, defaultValue interface{}) interface{} { +func defaultFunc(resultValue any) func(any, any) any { + return func(_ any, defaultValue any) any { if isNil(resultValue) { return defaultValue } @@ -197,14 +197,14 @@ func defaultFunc(resultValue interface{}) func(interface{}, interface{}) interfa } } -func isNilFunc(resultValue interface{}) func(interface{}) bool { - return func(_ interface{}) bool { +func isNilFunc(resultValue any) func(any) bool { + return func(_ any) bool { return isNil(resultValue) } } // isNil is courtesy of: https://gist.github.com/mangatmodi/06946f937cbff24788fa1d9f94b6b138 -func isNil(in interface{}) (out bool) { +func isNil(in any) (out bool) { if in == nil { out = true return @@ -220,7 +220,7 @@ func isNil(in interface{}) (out bool) { // valueFromPointer allows pointers to be passed in from the provider, but then extracts the value from // the pointer if the pointer is not nil, else returns nil -func valueFromPointer(in interface{}) (out interface{}) { +func valueFromPointer(in any) (out any) { if isNil(in) { return } diff --git a/utils/evaluate/evaluate_test.go b/utils/evaluate/evaluate_test.go index e058d2aa4b..7839e47256 100644 --- a/utils/evaluate/evaluate_test.go +++ b/utils/evaluate/evaluate_test.go @@ -126,11 +126,11 @@ func TestErrorWithInvalidReference(t *testing.T) { } func TestEvaluateArray(t *testing.T) { - floats := map[string]interface{}{ - "service_apdex": map[string]interface{}{ + floats := map[string]any{ + "service_apdex": map[string]any{ "label": nil, - "values": map[string]interface{}{ - "values": []interface{}{float64(2), float64(2)}, + "values": map[string]any{ + "values": []any{float64(2), float64(2)}, }, }, } @@ -169,7 +169,7 @@ func TestEvaluateAsIntPanic(t *testing.T) { func TestEvaluateAsInt(t *testing.T) { tests := []struct { - input interface{} + input any expression string expectation bool }{ @@ -186,7 +186,7 @@ func TestEvaluateAsInt(t *testing.T) { func TestEvaluateAsFloatError(t *testing.T) { tests := []struct { - input interface{} + input any expression string errRegexp string }{ @@ -203,7 +203,7 @@ func TestEvaluateAsFloatError(t *testing.T) { func TestEvaluateAsFloat(t *testing.T) { tests := []struct { - input interface{} + input any expression string expectation bool }{ diff --git a/utils/ingress/ingress_test.go b/utils/ingress/ingress_test.go index 7d6cecee63..ccbd3449f1 100644 --- a/utils/ingress/ingress_test.go +++ b/utils/ingress/ingress_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -587,12 +586,12 @@ func getNetworkingIngress() *networkingv1.Ingress { }, }, Status: networkingv1.IngressStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { IP: "127.0.0.1", Hostname: "localhost", - Ports: []corev1.PortStatus{ + Ports: []networkingv1.IngressPortStatus{ { Port: 8080, Protocol: "http", @@ -630,12 +629,12 @@ func getExtensionsIngress() *extensionsv1beta1.Ingress { }, }, Status: extensionsv1beta1.IngressStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + LoadBalancer: extensionsv1beta1.IngressLoadBalancerStatus{ + Ingress: []extensionsv1beta1.IngressLoadBalancerIngress{ { IP: "127.0.0.1", Hostname: "localhost", - Ports: []corev1.PortStatus{ + Ports: []extensionsv1beta1.IngressPortStatus{ { Port: 8080, Protocol: "http", diff --git a/utils/ingress/wrapper.go b/utils/ingress/wrapper.go index d70b6a96ed..c21bad82f7 100644 --- a/utils/ingress/wrapper.go +++ b/utils/ingress/wrapper.go @@ -6,7 +6,6 @@ import ( "sort" "sync" - corev1 "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -322,15 +321,19 @@ func (i *Ingress) GetNamespace() string { } } -func (i *Ingress) GetLoadBalancerStatus() corev1.LoadBalancerStatus { +func (i *Ingress) GetLoadBalancerHostnames() []string { + hostnames := []string{} switch i.mode { case IngressModeNetworking: - return i.ingress.Status.LoadBalancer + for _, ingress := range i.ingress.Status.LoadBalancer.Ingress { + hostnames = append(hostnames, ingress.Hostname) + } case IngressModeExtensions: - return i.legacyIngress.Status.LoadBalancer - default: - return corev1.LoadBalancerStatus{} + for _, ingress := range i.legacyIngress.Status.LoadBalancer.Ingress { + hostnames = append(hostnames, ingress.Hostname) + } } + return hostnames } func (i *Ingress) Mode() IngressMode { diff --git a/utils/ingress/wrapper_test.go b/utils/ingress/wrapper_test.go index 9e8f511494..0d8be79150 100644 --- a/utils/ingress/wrapper_test.go +++ b/utils/ingress/wrapper_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -450,30 +450,30 @@ func TestDeepCopy(t *testing.T) { }) } -func TestGetLoadBalancerStatus(t *testing.T) { - t.Run("will get loadbalancer status from wrapped networking.Ingress", func(t *testing.T) { +func TestGetLoadBalancerHostnames(t *testing.T) { + t.Run("will get loadbalancer hostnames from wrapped networking.Ingress", func(t *testing.T) { // given t.Parallel() i := getNetworkingIngress() ni := ingress.NewIngress(i) // when - lbs := ni.GetLoadBalancerStatus() + lbs := ni.GetLoadBalancerHostnames() // then - assert.Equal(t, i.Status.LoadBalancer, lbs) + assert.Equal(t, []string{"localhost"}, lbs) }) - t.Run("will get loadbalancer status from wrapped extensions.Ingress", func(t *testing.T) { + t.Run("will get loadbalancer hostnames from wrapped extensions.Ingress", func(t *testing.T) { // given t.Parallel() i := getExtensionsIngress() li := ingress.NewLegacyIngress(i) // when - lbs := li.GetLoadBalancerStatus() + lbs := li.GetLoadBalancerHostnames() // then - assert.Equal(t, i.Status.LoadBalancer, lbs) + assert.Equal(t, []string{"localhost"}, lbs) }) } @@ -914,12 +914,12 @@ func getNetworkingIngress() *v1.Ingress { }, }, Status: v1.IngressStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ { IP: "127.0.0.1", Hostname: "localhost", - Ports: []corev1.PortStatus{ + Ports: []networkingv1.IngressPortStatus{ { Port: 8080, Protocol: "http", @@ -954,12 +954,12 @@ func getExtensionsIngress() *v1beta1.Ingress { }, }, Status: v1beta1.IngressStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ + LoadBalancer: v1beta1.IngressLoadBalancerStatus{ + Ingress: []v1beta1.IngressLoadBalancerIngress{ { IP: "127.0.0.1", Hostname: "localhost", - Ports: []corev1.PortStatus{ + Ports: []v1beta1.IngressPortStatus{ { Port: 8080, Protocol: "http", diff --git a/utils/json/json.go b/utils/json/json.go index d946ff4621..96b2a3f7c7 100644 --- a/utils/json/json.go +++ b/utils/json/json.go @@ -10,7 +10,7 @@ import ( // MustMarshal marshals an object and panics if it failures. This function should only be used // when the object being passed in does not have any chance of failing (i.e. you constructed // the object yourself) -func MustMarshal(i interface{}) []byte { +func MustMarshal(i any) []byte { bytes, err := json.Marshal(i) if err != nil { panic(err) @@ -27,7 +27,7 @@ func (j *JSONMarshaler) ContentType() string { } // Marshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Marshal(v interface{}) ([]byte, error) { +func (j *JSONMarshaler) Marshal(v any) ([]byte, error) { return json.Marshal(v) } @@ -42,6 +42,6 @@ func (j *JSONMarshaler) NewEncoder(w io.Writer) gwruntime.Encoder { } // Unmarshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Unmarshal(data []byte, v interface{}) error { +func (j *JSONMarshaler) Unmarshal(data []byte, v any) error { return json.Unmarshal(data, v) } diff --git a/utils/json/json_test.go b/utils/json/json_test.go index 2b3b734963..1f3a18e19f 100644 --- a/utils/json/json_test.go +++ b/utils/json/json_test.go @@ -23,7 +23,7 @@ type I interface { } func TestMustMarshalPanics(t *testing.T) { - test := map[string]interface{}{ + test := map[string]any{ "foo": make(chan int), } assert.Panics(t, func() { MustMarshal(test) }) diff --git a/utils/log/log.go b/utils/log/log.go index 049f26f935..86a391fd2e 100644 --- a/utils/log/log.go +++ b/utils/log/log.go @@ -74,7 +74,7 @@ func WithObject(obj runtime.Object) *log.Entry { // This is an optimization that callers can use to avoid inferring this again from a runtime.Object func KindNamespaceName(logCtx *log.Entry) (string, string, string) { var kind string - var nameIf interface{} + var nameIf any var ok bool if nameIf, ok = logCtx.Data["rollout"]; ok { kind = "Rollout" @@ -118,7 +118,7 @@ func WithRedactor(entry log.Entry, secrets []string) *log.Entry { } func WithVersionFields(entry *log.Entry, r *v1alpha1.Rollout) *log.Entry { - return entry.WithFields(map[string]interface{}{ + return entry.WithFields(map[string]any{ "resourceVersion": r.ResourceVersion, "generation": r.Generation, }) diff --git a/utils/plugin/downloader.go b/utils/plugin/downloader.go index b8b1ad6263..a67dcb38e5 100644 --- a/utils/plugin/downloader.go +++ b/utils/plugin/downloader.go @@ -43,12 +43,17 @@ func checkPluginExists(pluginLocation string) error { } func checkShaOfPlugin(pluginLocation string, expectedSha256 string) (bool, error) { - hasher := sha256.New() fileBytes, err := os.ReadFile(pluginLocation) if err != nil { return false, fmt.Errorf("failed to read file %s: %w", pluginLocation, err) } - fileSha256 := fmt.Sprintf("%x", hasher.Sum(fileBytes)) + var fileSha256 string + if len(expectedSha256) == 64 { + fileSha256 = fmt.Sprintf("%x", sha256.Sum256(fileBytes)) + } else { + hasher := sha256.New() + fileSha256 = fmt.Sprintf("%x", hasher.Sum(fileBytes)) + } match := fileSha256 == expectedSha256 if !match { log.Printf("expected sha256: %s, actual sha256: %s, of downloaded metric plugin (%s)", expectedSha256, fileSha256, pluginLocation) diff --git a/utils/plugin/downloader_test.go b/utils/plugin/downloader_test.go index 75dc4cae71..ab14be3385 100644 --- a/utils/plugin/downloader_test.go +++ b/utils/plugin/downloader_test.go @@ -47,7 +47,7 @@ func TestPlugin(t *testing.T) { Name: "argo-rollouts-config", Namespace: "argo-rollouts", }, - Data: map[string]string{"metricProviderPlugins": "\n - name: argoproj-labs/http\n location: https://test/plugin\n - name: argoproj-labs/http-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + Data: map[string]string{"metricProviderPlugins": "\n - name: argoproj-labs/http\n location: https://test/plugin\n - name: argoproj-labs/http-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n - name: argoproj-labs/http-sha-correct\n location: https://test/plugin\n sha256: 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"}, } client := fake.NewSimpleClientset(cm) diff --git a/utils/plugin/plugin.go b/utils/plugin/plugin.go index a57481adae..3fd7a013a0 100644 --- a/utils/plugin/plugin.go +++ b/utils/plugin/plugin.go @@ -9,26 +9,26 @@ import ( "github.com/argoproj/argo-rollouts/utils/config" ) -// GetPluginLocation returns the location of the plugin on the filesystem via plugin name. If the plugin is not +// GetPluginInfo returns the location & command arguments of the plugin on the filesystem via plugin name. If the plugin is not // configured in the configmap, an error is returned. -func GetPluginLocation(pluginName string) (string, error) { +func GetPluginInfo(pluginName string) (string, []string, error) { configMap, err := config.GetConfig() if err != nil { - return "", fmt.Errorf("failed to get config: %w", err) + return "", nil, fmt.Errorf("failed to get config: %w", err) } for _, item := range configMap.GetAllPlugins() { if pluginName == item.Name { dir, filename, err := config.GetPluginDirectoryAndFilename(item.Name) if err != nil { - return "", err + return "", nil, err } absFilePath, err := filepath.Abs(filepath.Join(defaults.DefaultRolloutPluginFolder, dir, filename)) if err != nil { - return "", fmt.Errorf("failed to get absolute path of plugin folder: %w", err) + return "", nil, fmt.Errorf("failed to get absolute path of plugin folder: %w", err) } - return absFilePath, nil + return absFilePath, item.Args, nil } } - return "", fmt.Errorf("plugin %s not configured in configmap", pluginName) + return "", nil, fmt.Errorf("plugin %s not configured in configmap", pluginName) } diff --git a/utils/plugin/plugin_test.go b/utils/plugin/plugin_test.go index 63e39285ab..7226d2d312 100644 --- a/utils/plugin/plugin_test.go +++ b/utils/plugin/plugin_test.go @@ -13,7 +13,8 @@ import ( "k8s.io/client-go/kubernetes/fake" ) -func TestGetPluginLocation(t *testing.T) { +func TestGetPluginInfo(t *testing.T) { + cmdArgs := []string{"-l 2"} t.Run("tests getting plugin location of metric provider plugins", func(t *testing.T) { cm := &v1.ConfigMap{ @@ -21,18 +22,22 @@ func TestGetPluginLocation(t *testing.T) { Name: "argo-rollouts-config", Namespace: "argo-rollouts", }, - Data: map[string]string{"metricProviderPlugins": "\n - name: argoproj-labs/http\n location: https://test/plugin\n - name: argoproj-labs/http-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + Data: map[string]string{"metricProviderPlugins": "\n - name: argoproj-labs/http\n location: https://test/plugin\n args: [\"-l 2\"]\n - name: argoproj-labs/http-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, } client := fake.NewSimpleClientset(cm) _, err := config.InitializeConfig(client, "argo-rollouts-config") assert.NoError(t, err) - location, err := GetPluginLocation("argoproj-labs/http") + location, args, err := GetPluginInfo("argoproj-labs/http") assert.NoError(t, err) fp, err := filepath.Abs(filepath.Join(defaults.DefaultRolloutPluginFolder, "argoproj-labs/http")) assert.NoError(t, err) assert.Equal(t, fp, location) + assert.Equal(t, args, cmdArgs) + + _, args, _ = GetPluginInfo("argoproj-labs/http-sha") + assert.Equal(t, len(args), 0) }) t.Run("tests getting plugin location of traffic router plugins", func(t *testing.T) { @@ -42,18 +47,22 @@ func TestGetPluginLocation(t *testing.T) { Name: "argo-rollouts-config", Namespace: "argo-rollouts", }, - Data: map[string]string{"trafficRouterPlugins": "\n - name: argoproj-labs/router\n location: https://test/plugin\n - name: argoproj-labs/router-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + Data: map[string]string{"trafficRouterPlugins": "\n - name: argoproj-labs/router\n location: https://test/plugin\n args: [\"-l 2\"]\n - name: argoproj-labs/router-sha\n location: https://test/plugin\n sha256: 74657374e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, } client := fake.NewSimpleClientset(cm) _, err := config.InitializeConfig(client, "argo-rollouts-config") assert.NoError(t, err) - location, err := GetPluginLocation("argoproj-labs/router") + location, args, err := GetPluginInfo("argoproj-labs/router") assert.NoError(t, err) fp, err := filepath.Abs(filepath.Join(defaults.DefaultRolloutPluginFolder, "argoproj-labs/router")) assert.NoError(t, err) assert.Equal(t, fp, location) + assert.Equal(t, args, cmdArgs) + + _, args, _ = GetPluginInfo("argoproj-labs/router-sha") + assert.Equal(t, len(args), 0) }) t.Run("test getting plugin location of a plugin that does not exists", func(t *testing.T) { @@ -69,9 +78,10 @@ func TestGetPluginLocation(t *testing.T) { _, err := config.InitializeConfig(client, "argo-rollouts-config") assert.NoError(t, err) - location, err := GetPluginLocation("does-not-exist") + location, args, err := GetPluginInfo("does-not-exist") assert.Error(t, err) assert.Equal(t, "plugin does-not-exist not configured in configmap", err.Error()) assert.Equal(t, "", location) + assert.Equal(t, len(args), 0) }) } diff --git a/utils/plugin/types/types.go b/utils/plugin/types/types.go index 102af1490a..e89ba40f2a 100644 --- a/utils/plugin/types/types.go +++ b/utils/plugin/types/types.go @@ -102,4 +102,7 @@ type PluginItem struct { Name string `json:"name" yaml:"name"` Location string `json:"location" yaml:"location"` Sha256 string `json:"sha256" yaml:"sha256"` + + // Args holds command line arguments + Args []string `json:"args" yaml:"args"` } diff --git a/utils/record/record.go b/utils/record/record.go index ef0cf23b17..96be7c8582 100644 --- a/utils/record/record.go +++ b/utils/record/record.go @@ -7,11 +7,13 @@ import ( "encoding/json" "fmt" "regexp" + "sort" "strings" + "sync" "time" + argoinformers "github.com/argoproj/argo-rollouts/pkg/client/informers/externalversions/rollouts/v1alpha1" timeutil "github.com/argoproj/argo-rollouts/utils/time" - "github.com/argoproj/notifications-engine/pkg/api" "github.com/argoproj/notifications-engine/pkg/services" "github.com/argoproj/notifications-engine/pkg/subscriptions" @@ -20,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" k8sinformers "k8s.io/client-go/informers" @@ -32,6 +35,7 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" rolloutscheme "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/scheme" + "github.com/argoproj/argo-rollouts/utils/annotations" logutil "github.com/argoproj/argo-rollouts/utils/log" ) @@ -58,8 +62,8 @@ type EventOptions struct { } type EventRecorder interface { - Eventf(object runtime.Object, opts EventOptions, messageFmt string, args ...interface{}) - Warnf(object runtime.Object, opts EventOptions, messageFmt string, args ...interface{}) + Eventf(object runtime.Object, opts EventOptions, messageFmt string, args ...any) + Warnf(object runtime.Object, opts EventOptions, messageFmt string, args ...any) K8sRecorder() record.EventRecorder } @@ -75,7 +79,7 @@ type EventRecorderAdapter struct { NotificationSuccessCounter *prometheus.CounterVec NotificationSendPerformance *prometheus.HistogramVec - eventf func(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...interface{}) + eventf func(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...any) // apiFactory is a notifications engine API factory apiFactory api.Factory } @@ -104,14 +108,36 @@ func NewEventRecorder(kubeclientset kubernetes.Interface, rolloutEventCounter *p // reasons which were emitted type FakeEventRecorder struct { EventRecorderAdapter - Events []string + // acquire eventsLock before using events + events []string + eventsLock sync.Mutex +} + +func (e *FakeEventRecorder) appendEvents(events ...string) { + e.eventsLock.Lock() + defer e.eventsLock.Unlock() + + e.events = append(e.events, events...) +} + +// Events returns a list of received events, with thread safety +func (e *FakeEventRecorder) Events() []string { + + e.eventsLock.Lock() + defer e.eventsLock.Unlock() + + if e.events == nil { + return nil + } + + return append(make([]string, 0), e.events...) } func NewFakeApiFactory() api.Factory { var ( settings = api.Settings{ConfigMapName: "my-config-map", SecretName: "my-secret", InitGetVars: func(cfg *api.Config, configMap *corev1.ConfigMap, secret *corev1.Secret) (api.GetVars, error) { - return func(obj map[string]interface{}, dest services.Destination) map[string]interface{} { - return map[string]interface{}{"obj": obj} + return func(obj map[string]any, dest services.Destination) map[string]any { + return map[string]any{"obj": obj} }, nil }} ) @@ -173,29 +199,29 @@ func NewFakeEventRecorder() *FakeEventRecorder { ).(*EventRecorderAdapter) recorder.Recorder = record.NewFakeRecorder(1000) fakeRecorder := &FakeEventRecorder{} - recorder.eventf = func(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...interface{}) { + recorder.eventf = func(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...any) { recorder.defaultEventf(object, warn, opts, messageFmt, args...) - fakeRecorder.Events = append(fakeRecorder.Events, opts.EventReason) + fakeRecorder.appendEvents(opts.EventReason) } fakeRecorder.EventRecorderAdapter = *recorder return fakeRecorder } -func (e *EventRecorderAdapter) Eventf(object runtime.Object, opts EventOptions, messageFmt string, args ...interface{}) { +func (e *EventRecorderAdapter) Eventf(object runtime.Object, opts EventOptions, messageFmt string, args ...any) { if opts.EventType == "" { opts.EventType = corev1.EventTypeNormal } e.eventf(object, opts.EventType == corev1.EventTypeWarning, opts, messageFmt, args...) } -func (e *EventRecorderAdapter) Warnf(object runtime.Object, opts EventOptions, messageFmt string, args ...interface{}) { +func (e *EventRecorderAdapter) Warnf(object runtime.Object, opts EventOptions, messageFmt string, args ...any) { opts.EventType = corev1.EventTypeWarning e.eventf(object, true, opts, messageFmt, args...) } // defaultEventf is the default implementation of eventf, which is able to be overwritten for // test purposes -func (e *EventRecorderAdapter) defaultEventf(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...interface{}) { +func (e *EventRecorderAdapter) defaultEventf(object runtime.Object, warn bool, opts EventOptions, messageFmt string, args ...any) { logCtx := logutil.WithObject(object) if opts.EventReason != "" { @@ -208,20 +234,22 @@ func (e *EventRecorderAdapter) defaultEventf(object runtime.Object, warn bool, o e.RolloutEventCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc() } - apis, err := e.apiFactory.GetAPIsFromNamespace(namespace) - if err != nil { - logCtx.Errorf("notifications failed to get apis for eventReason %s with error: %s", opts.EventReason, err) - e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc() - } - - for _, api := range apis { - err := e.sendNotifications(api, object, opts) + if e.apiFactory != nil { + apis, err := e.apiFactory.GetAPIsFromNamespace(namespace) if err != nil { - logCtx.Errorf("Notifications failed to send for eventReason %s with error: %s", opts.EventReason, err) + logCtx.Errorf("notifications failed to get apis for eventReason %s with error: %s", opts.EventReason, err) + e.NotificationFailedCounter.WithLabelValues(namespace, name, opts.EventType, opts.EventReason).Inc() + } + + for _, api := range apis { + err := e.sendNotifications(api, object, opts) + if err != nil { + logCtx.Errorf("Notifications failed to send for eventReason %s with error: %s", opts.EventReason, err) + } } } - } + } logFn := logCtx.Infof if warn { logFn = logCtx.Warnf @@ -233,13 +261,92 @@ func (e *EventRecorderAdapter) K8sRecorder() record.EventRecorder { return e.Recorder } -func NewAPIFactorySettings() api.Settings { +func getAnalysisRunsFilterWithLabels(ro v1alpha1.Rollout, arInformer argoinformers.AnalysisRunInformer) (any, error) { + + set := labels.Set(map[string]string{ + v1alpha1.DefaultRolloutUniqueLabelKey: ro.Status.CurrentPodHash, + }) + + revision, _ := annotations.GetRevisionAnnotation(&ro) + ars, err := arInformer.Lister().AnalysisRuns(ro.Namespace).List(labels.SelectorFromSet(set)) + if err != nil { + return nil, fmt.Errorf("error getting analysisruns from informer for namespace: %s error: %w", ro.Namespace, err) + } + if len(ars) == 0 { + return nil, nil + } + + filteredArs := make([]*v1alpha1.AnalysisRun, 0, len(ars)) + for _, ar := range ars { + arRevision, _ := annotations.GetRevisionAnnotation(ar) + if arRevision == revision { + filteredArs = append(filteredArs, ar) + } + } + + sort.Slice(filteredArs, func(i, j int) bool { + ts1 := filteredArs[i].ObjectMeta.CreationTimestamp.Time + ts2 := filteredArs[j].ObjectMeta.CreationTimestamp.Time + return ts1.After(ts2) + }) + + var arsObj any + arBytes, err := json.Marshal(filteredArs) + + if err != nil { + return nil, fmt.Errorf("Failed to marshal analysisRuns for rollout revision: %s, err: %w", string(revision), err) + } + + err = json.Unmarshal(arBytes, &arsObj) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshal analysisRuns for rollout revision: %s, err: %w", string(revision), err) + } + + return arsObj, nil +} + +func NewAPIFactorySettings(arInformer argoinformers.AnalysisRunInformer) api.Settings { return api.Settings{ SecretName: NotificationSecret, ConfigMapName: NotificationConfigMap, InitGetVars: func(cfg *api.Config, configMap *corev1.ConfigMap, secret *corev1.Secret) (api.GetVars, error) { - return func(obj map[string]interface{}, dest services.Destination) map[string]interface{} { - return map[string]interface{}{"rollout": obj, "time": timeExprs} + return func(obj map[string]any, dest services.Destination) map[string]any { + + var vars = map[string]any{ + "rollout": obj, + "time": timeExprs, + "secrets": secret.Data, + } + + if arInformer == nil { + log.Infof("Notification is not set for analysisRun Informer: %s", dest) + return vars + } + + var ro v1alpha1.Rollout + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj, &ro) + + if err != nil { + log.Errorf("unable to send notification: bad rollout object: %v", err) + return vars + } + + arsObj, err := getAnalysisRunsFilterWithLabels(ro, arInformer) + + if err != nil { + log.Errorf("Error calling getAnalysisRunsFilterWithLabels for namespace: %s", + ro.Namespace) + return vars + + } + + vars = map[string]any{ + "rollout": obj, + "analysisRuns": arsObj, + "time": timeExprs, + "secrets": secret.Data, + } + return vars }, nil }, } @@ -331,12 +438,12 @@ func hash(input string) string { } // toObjectMap converts an object to a map for the purposes of sending to the notification engine -func toObjectMap(object interface{}) (map[string]interface{}, error) { +func toObjectMap(object any) (map[string]any, error) { objBytes, err := json.Marshal(object) if err != nil { return nil, err } - var objMap map[string]interface{} + var objMap map[string]any err = json.Unmarshal(objBytes, &objMap) if err != nil { return nil, err @@ -350,7 +457,7 @@ func toObjectMap(object interface{}) (map[string]interface{}, error) { if err != nil { return nil, err } - var templateMap map[string]interface{} + var templateMap map[string]any err = json.Unmarshal(templateBytes, &templateMap) if err != nil { return nil, err @@ -364,7 +471,7 @@ func toObjectMap(object interface{}) (map[string]interface{}, error) { if err != nil { return nil, err } - var selectorMap map[string]interface{} + var selectorMap map[string]any err = json.Unmarshal(selectorBytes, &selectorMap) if err != nil { return nil, err @@ -385,7 +492,7 @@ func translateReasonToTrigger(reason string) string { return "on-" + strings.ToLower(trigger) } -var timeExprs = map[string]interface{}{ +var timeExprs = map[string]any{ "Parse": parse, "Now": now, } diff --git a/utils/record/record_test.go b/utils/record/record_test.go index 97650bae46..dfea598eae 100644 --- a/utils/record/record_test.go +++ b/utils/record/record_test.go @@ -2,6 +2,7 @@ package record import ( "bytes" + "encoding/json" "errors" "fmt" "strings" @@ -9,7 +10,11 @@ import ( "time" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + argofake "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake" + argoinformersfactory "github.com/argoproj/argo-rollouts/pkg/client/informers/externalversions" + argoinformers "github.com/argoproj/argo-rollouts/pkg/client/informers/externalversions/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" + timeutil "github.com/argoproj/argo-rollouts/utils/time" "github.com/argoproj/notifications-engine/pkg/api" notificationapi "github.com/argoproj/notifications-engine/pkg/api" "github.com/argoproj/notifications-engine/pkg/mocks" @@ -21,13 +26,19 @@ import ( dto "github.com/prometheus/client_model/go" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" ) +var ( + noResyncPeriodFunc = func() time.Duration { return 0 } +) + func TestRecordLog(t *testing.T) { prevOutput := log.StandardLogger().Out defer func() { @@ -86,7 +97,7 @@ func TestIncCounter(t *testing.T) { buf := dto.Metric{} m.Write(&buf) assert.Equal(t, float64(3), *buf.Counter.Value) - assert.Equal(t, []string{"FooReason", "FooReason", "FooReason"}, rec.Events) + assert.Equal(t, []string{"FooReason", "FooReason", "FooReason"}, rec.Events()) } func TestSendNotifications(t *testing.T) { @@ -178,13 +189,18 @@ func TestSendNotificationsWhenConditionTime(t *testing.T) { k8sClient := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(k8sClient, 0) + + f := argofake.NewSimpleClientset() + rolloutsI := argoinformersfactory.NewSharedInformerFactory(f, noResyncPeriodFunc()) + arInformer := rolloutsI.Argoproj().V1alpha1().AnalysisRuns() + cmInformer := sharedInformers.Core().V1().ConfigMaps().Informer() secretInformer := sharedInformers.Core().V1().Secrets().Informer() secretInformer.GetIndexer().Add(secret) cmInformer.GetIndexer().Add(cm) - apiFactory := notificationapi.NewFactory(NewAPIFactorySettings(), defaults.Namespace(), secretInformer, cmInformer) + apiFactory := notificationapi.NewFactory(NewAPIFactorySettings(arInformer), defaults.Namespace(), secretInformer, cmInformer) api, err := apiFactory.GetAPI() assert.NoError(t, err) @@ -224,8 +240,11 @@ func TestSendNotificationsWhenConditionTime(t *testing.T) { secretInformer.GetIndexer().Add(secret) cmInformer.GetIndexer().Add(cm) + f := argofake.NewSimpleClientset() + rolloutsI := argoinformersfactory.NewSharedInformerFactory(f, noResyncPeriodFunc()) + arInformer := rolloutsI.Argoproj().V1alpha1().AnalysisRuns() - apiFactory := notificationapi.NewFactory(NewAPIFactorySettings(), defaults.Namespace(), secretInformer, cmInformer) + apiFactory := notificationapi.NewFactory(NewAPIFactorySettings(arInformer), defaults.Namespace(), secretInformer, cmInformer) api, err := apiFactory.GetAPI() assert.NoError(t, err) @@ -298,7 +317,13 @@ func TestNotificationSendPerformance(t *testing.T) { if err != nil { t.Fatalf("error: %v", err) } - log.Infof("mfs: %v, %v, %v, %v", *mfs[0], *mfs[0].Metric[0].Histogram.SampleCount, *mfs[0].Metric[0].Histogram.SampleSum, *mfs[0].Metric[0].Histogram.Bucket[0].CumulativeCount) + t.Logf( + "mfs: %s, %v, %v, %v", + mfs[0].GetName(), + mfs[0].GetMetric()[0].GetHistogram().GetSampleCount(), + mfs[0].GetMetric()[0].GetHistogram().GetSampleSum(), + mfs[0].GetMetric()[0].GetHistogram().GetBucket()[0].GetCumulativeCount(), + ) want := `# HELP notification_send_performance Notification send performance. # TYPE notification_send_performance histogram notification_send_performance_bucket{name="guestbook",namespace="default",le="0.01"} 0 @@ -422,17 +447,178 @@ func TestSendNotificationsNoTrigger(t *testing.T) { assert.Len(t, err, 1) } +func createAnalysisRunInformer(ars []*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer { + f := argofake.NewSimpleClientset() + rolloutsI := argoinformersfactory.NewSharedInformerFactory(f, noResyncPeriodFunc()) + arInformer := rolloutsI.Argoproj().V1alpha1().AnalysisRuns() + for _, ar := range ars { + _ = arInformer.Informer().GetStore().Add(ar) + } + return arInformer +} + func TestNewAPIFactorySettings(t *testing.T) { - settings := NewAPIFactorySettings() - assert.Equal(t, NotificationConfigMap, settings.ConfigMapName) - assert.Equal(t, NotificationSecret, settings.SecretName) - getVars, err := settings.InitGetVars(nil, nil, nil) - assert.NoError(t, err) - rollout := map[string]interface{}{"name": "hello"} - vars := getVars(rollout, services.Destination{}) + ars := []*v1alpha1.AnalysisRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "analysis-run-1", + CreationTimestamp: metav1.NewTime(timeutil.Now().Add(-1 * time.Hour)), + Namespace: "default", + Labels: map[string]string{"rollouts-pod-template-hash": "85659df978"}, + Annotations: map[string]string{"rollout.argoproj.io/revision": "1"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "analysis-run-2", + CreationTimestamp: metav1.NewTime(timeutil.Now().Add(-2 * time.Hour)), + Namespace: "default", + Labels: map[string]string{"rollouts-pod-template-hash": "85659df978"}, + Annotations: map[string]string{"rollout.argoproj.io/revision": "1"}, + }, + }, + } + ro := v1alpha1.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rollout", + Namespace: "default", + Annotations: map[string]string{"rollout.argoproj.io/revision": "1"}, + }, + Status: v1alpha1.RolloutStatus{ + CurrentPodHash: "85659df978", + }, + } + + expectedSecrets := map[string][]byte{ + "notification-secret": []byte("secret-value"), + } - assert.Equal(t, map[string]interface{}{"rollout": rollout, "time": timeExprs}, vars) + notificationsSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "argo-rollouts-notification-secret", + Namespace: "default", + }, + Data: expectedSecrets, + } + + type expectedFunc func(obj map[string]interface{}, ar any) map[string]interface{} + type arInformerFunc func([]*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer + + testcase := []struct { + name string + arInformer arInformerFunc + rollout v1alpha1.Rollout + ars []*v1alpha1.AnalysisRun + expected expectedFunc + }{ + { + name: "Send notification with rollout and analysisRun", + arInformer: func(ars []*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer { + return createAnalysisRunInformer(ars) + }, + rollout: ro, + ars: ars, + expected: func(obj map[string]interface{}, ar any) map[string]interface{} { + return map[string]interface{}{ + "rollout": obj, + "analysisRuns": ar, + "time": timeExprs, + "secrets": expectedSecrets, + } + }, + }, + { + name: "Send notification rollout when revision and label doesn't match", + arInformer: func(ars []*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer { + return createAnalysisRunInformer(ars) + }, + rollout: ro, + ars: []*v1alpha1.AnalysisRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "analysis-run-3", + CreationTimestamp: metav1.NewTime(timeutil.Now().Add(-2 * time.Hour)), + Namespace: "default", + Labels: map[string]string{"rollouts-pod-template-hash": "1234"}, + Annotations: map[string]string{"rollout.argoproj.io/revision": "2"}, + }, + }, + }, + expected: func(obj map[string]interface{}, ar any) map[string]interface{} { + return map[string]interface{}{ + "rollout": obj, + "analysisRuns": nil, + "time": timeExprs, + "secrets": expectedSecrets, + } + }, + }, + { + name: "arInformer is nil", + arInformer: func(ars []*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer { + return nil + }, + rollout: ro, + ars: nil, + expected: func(obj map[string]interface{}, ar any) map[string]interface{} { + return map[string]interface{}{ + "rollout": obj, + "time": timeExprs, + "secrets": expectedSecrets, + } + }, + }, + { + name: "analysisRuns nil for no matching namespace", + arInformer: func(ars []*v1alpha1.AnalysisRun) argoinformers.AnalysisRunInformer { + return createAnalysisRunInformer(ars) + }, + rollout: ro, + ars: []*v1alpha1.AnalysisRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "analysis-run-1", + CreationTimestamp: metav1.NewTime(timeutil.Now().Add(-2 * time.Hour)), + Namespace: "default-1", + Labels: map[string]string{"rollouts-pod-template-hash": "1234"}, + Annotations: map[string]string{"rollout.argoproj.io/revision": "2"}, + }, + }, + }, + expected: func(obj map[string]interface{}, ar any) map[string]interface{} { + return map[string]interface{}{ + "rollout": obj, + "analysisRuns": nil, + "time": timeExprs, + "secrets": expectedSecrets, + } + }, + }, + } + + for _, test := range testcase { + t.Run(test.name, func(t *testing.T) { + + settings := NewAPIFactorySettings(test.arInformer(test.ars)) + getVars, err := settings.InitGetVars(nil, nil, ¬ificationsSecret) + require.NoError(t, err) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + obj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(&test.rollout) + + arBytes, err := json.Marshal(test.ars) + var arsObj any + _ = json.Unmarshal(arBytes, &arsObj) + vars := getVars(obj, services.Destination{}) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + assert.Equal(t, test.expected(obj, arsObj), vars) + }) + } } func TestWorkloadRefObjectMap(t *testing.T) { diff --git a/utils/replicaset/canary.go b/utils/replicaset/canary.go old mode 100755 new mode 100644 index 40eadb7848..f8fd8fe869 --- a/utils/replicaset/canary.go +++ b/utils/replicaset/canary.go @@ -4,17 +4,20 @@ import ( "encoding/json" "math" + "github.com/argoproj/argo-rollouts/utils/annotations" + log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/argoproj/argo-rollouts/utils/defaults" + "github.com/argoproj/argo-rollouts/utils/weightutil" ) const ( // EphemeralMetadataAnnotation denotes pod metadata which is ephemerally injected to canary/stable pods - EphemeralMetadataAnnotation = "rollout.argoproj.io/ephemeral-metadata" + EphemeralMetadataAnnotation = annotations.RolloutLabel + "/ephemeral-metadata" ) func allDesiredAreAvailable(rs *appsv1.ReplicaSet, desired int32) bool { @@ -56,11 +59,12 @@ func AtDesiredReplicaCountsForCanary(ro *v1alpha1.Rollout, newRS, stableRS *apps // when using the basic canary strategy. The function calculates the desired number of replicas for // the new and stable RS using the following equations: // -// newRS Replica count = spec.Replica * (setweight / 100) -// stableRS Replica count = spec.Replica * (1 - setweight / 100) +// desired newRS Replica count = spec.Replica * (setweight / maxweight) +// desired stableRS Replica count = spec.Replica - newRS +// +// The function for newRS finds the closest whole number of replicas based on the weight percentage +// and rounds up the desired replica count in case of a tie. // -// In both equations, the function rounds the desired replica count up if the math does not divide into whole numbers -// because the rollout guarantees at least one replica for both the stable and new RS when the setWeight is not 0 or 100. // Then, the function finds the number of replicas it can scale up using the following equation: // // scaleUpCount := (maxSurge + rollout.Spec.Replica) - sum of rollout's RSs spec.Replica @@ -91,8 +95,9 @@ func CalculateReplicaCountsForBasicCanary(rollout *v1alpha1.Rollout, newRS *apps rolloutSpecReplica := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) _, desiredWeight := GetCanaryReplicasOrWeight(rollout) maxSurge := MaxSurge(rollout) + maxWeight := weightutil.MaxTrafficWeight(rollout) - desiredNewRSReplicaCount, desiredStableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(rolloutSpecReplica, desiredWeight, maxSurge) + desiredNewRSReplicaCount, desiredStableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(rolloutSpecReplica, desiredWeight, maxWeight, maxSurge) stableRSReplicaCount := int32(0) newRSReplicaCount := int32(0) @@ -180,7 +185,7 @@ func CalculateReplicaCountsForBasicCanary(rollout *v1alpha1.Rollout, newRS *apps // canary/stable replica counts might sum to either spec.replicas or spec.replicas + 1 but will not // exceed spec.replicas if maxSurge is 0. If the canary weight is between 1-99, and spec.replicas is > 1, // we will always return a minimum of 1 for stable and canary as to not return 0. -func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, maxSurge int32) (int32, int32) { +func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, maxWeight, maxSurge int32) (int32, int32) { if specReplicas == 0 { return 0, 0 } @@ -192,14 +197,14 @@ func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, m } var options []canaryOption - ceilWeightedCanaryCount := int32(math.Ceil(float64(specReplicas*desiredWeight) / 100.0)) - floorWeightedCanaryCount := int32(math.Floor(float64(specReplicas*desiredWeight) / 100.0)) + ceilWeightedCanaryCount := int32(math.Ceil(float64(specReplicas*desiredWeight) / float64(maxWeight))) + floorWeightedCanaryCount := int32(math.Floor(float64(specReplicas*desiredWeight) / float64(maxWeight))) - tied := floorCeilingTied(desiredWeight, specReplicas) + tied := floorCeilingTied(desiredWeight, maxWeight, specReplicas) // zeroAllowed indicates if are allowed to return the floored value if it is zero. We don't allow // the value to be zero if when user has a weight from 1-99, and they run 2+ replicas (surge included) - zeroAllowed := desiredWeight == 100 || desiredWeight == 0 || (specReplicas == 1 && maxSurge == 0) + zeroAllowed := desiredWeight == (maxWeight) || desiredWeight == 0 || (specReplicas == 1 && maxSurge == 0) if ceilWeightedCanaryCount < specReplicas || zeroAllowed { options = append(options, canaryOption{ceilWeightedCanaryCount, specReplicas}) @@ -213,7 +218,7 @@ func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, m // in order to achieve a closer canary weight if maxSurge > 0 { options = append(options, canaryOption{ceilWeightedCanaryCount, specReplicas + 1}) - surgeIsTied := floorCeilingTied(desiredWeight, specReplicas+1) + surgeIsTied := floorCeilingTied(desiredWeight, maxWeight, specReplicas+1) if !surgeIsTied && (floorWeightedCanaryCount != 0 || zeroAllowed) { options = append(options, canaryOption{floorWeightedCanaryCount, specReplicas + 1}) } @@ -225,10 +230,10 @@ func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, m } bestOption := options[0] - bestDelta := weightDelta(desiredWeight, bestOption.canary, bestOption.total) + bestDelta := weightDelta(desiredWeight, maxWeight, bestOption.canary, bestOption.total) for i := 1; i < len(options); i++ { currOption := options[i] - currDelta := weightDelta(desiredWeight, currOption.canary, currOption.total) + currDelta := weightDelta(desiredWeight, maxWeight, currOption.canary, currOption.total) if currDelta < bestDelta { bestOption = currOption bestDelta = currDelta @@ -241,15 +246,15 @@ func approximateWeightedCanaryStableReplicaCounts(specReplicas, desiredWeight, m // For example: replicas: 3, desiredWeight: 50% // A canary count of 1 (33.33%) or 2 (66.66%) are both equidistant from desired weight of 50%. // When this happens, we will pick the larger canary count -func floorCeilingTied(desiredWeight, totalReplicas int32) bool { - _, frac := math.Modf(float64(totalReplicas) * (float64(desiredWeight) / 100)) +func floorCeilingTied(desiredWeight, maxWeight, totalReplicas int32) bool { + _, frac := math.Modf(float64(totalReplicas) * (float64(desiredWeight) / float64(maxWeight))) return frac == 0.5 } // weightDelta calculates the difference that the canary replicas will be from the desired weight // This is used to pick the closest approximation of canary counts. -func weightDelta(desiredWeight, canaryReplicas, totalReplicas int32) float64 { - actualWeight := float64(canaryReplicas*100) / float64(totalReplicas) +func weightDelta(desiredWeight, maxWeight, canaryReplicas, totalReplicas int32) float64 { + actualWeight := float64(canaryReplicas*maxWeight) / float64(totalReplicas) return math.Abs(actualWeight - float64(desiredWeight)) } @@ -337,11 +342,12 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei var canaryCount, stableCount int32 rolloutSpecReplica := defaults.GetReplicasOrDefault(rollout.Spec.Replicas) setCanaryScaleReplicas, desiredWeight := GetCanaryReplicasOrWeight(rollout) + maxWeight := weightutil.MaxTrafficWeight(rollout) if setCanaryScaleReplicas != nil { // a canary count was explicitly set canaryCount = *setCanaryScaleReplicas } else { - canaryCount = CheckMinPodsPerReplicaSet(rollout, trafficWeightToReplicas(rolloutSpecReplica, desiredWeight)) + canaryCount = CheckMinPodsPerReplicaSet(rollout, trafficWeightToReplicas(rolloutSpecReplica, desiredWeight, maxWeight)) } if !rollout.Spec.Strategy.Canary.DynamicStableScale { @@ -357,9 +363,10 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei // high, until we reduce traffic to it. // Case 2 occurs when we are going from high to low canary weight. In this scenario, // we need to increase the stable scale in preparation for increase of traffic to stable. - stableCount = trafficWeightToReplicas(rolloutSpecReplica, 100-desiredWeight) + // TODO calculate the replica set count from the max traffic weight. + stableCount = trafficWeightToReplicas(rolloutSpecReplica, maxWeight-desiredWeight, maxWeight) if weights != nil { - actualStableWeightReplicaCount := trafficWeightToReplicas(rolloutSpecReplica, weights.Stable.Weight) + actualStableWeightReplicaCount := trafficWeightToReplicas(rolloutSpecReplica, weights.Stable.Weight, maxWeight) stableCount = max(stableCount, actualStableWeightReplicaCount) if rollout.Status.Abort { @@ -368,7 +375,7 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei // 1. actual canary traffic weight // 2. desired canary traffic weight // This if block makes sure we don't scale down the canary prematurely - trafficWeightReplicaCount := trafficWeightToReplicas(rolloutSpecReplica, weights.Canary.Weight) + trafficWeightReplicaCount := trafficWeightToReplicas(rolloutSpecReplica, weights.Canary.Weight, maxWeight) canaryCount = max(trafficWeightReplicaCount, canaryCount) } } @@ -377,8 +384,8 @@ func CalculateReplicaCountsForTrafficRoutedCanary(rollout *v1alpha1.Rollout, wei // trafficWeightToReplicas returns the appropriate replicas given the full spec.replicas and a weight // Rounds up if not evenly divisible. -func trafficWeightToReplicas(replicas, weight int32) int32 { - return int32(math.Ceil(float64(weight*replicas) / 100)) +func trafficWeightToReplicas(replicas, weight, maxWeight int32) int32 { + return int32(math.Ceil(float64(weight) * float64(replicas) / float64(maxWeight))) } func max(left, right int32) int32 { @@ -459,7 +466,7 @@ func GetCurrentCanaryStep(rollout *v1alpha1.Rollout) (*v1alpha1.CanaryStep, *int // GetCanaryReplicasOrWeight either returns a static set of replicas or a weight percentage func GetCanaryReplicasOrWeight(rollout *v1alpha1.Rollout) (*int32, int32) { if rollout.Status.PromoteFull || rollout.Status.StableRS == "" || rollout.Status.CurrentPodHash == rollout.Status.StableRS { - return nil, 100 + return nil, weightutil.MaxTrafficWeight(rollout) } if scs := UseSetCanaryScale(rollout); scs != nil { if scs.Replicas != nil { @@ -480,7 +487,7 @@ func GetCurrentSetWeight(rollout *v1alpha1.Rollout) int32 { } currentStep, currentStepIndex := GetCurrentCanaryStep(rollout) if currentStep == nil { - return 100 + return weightutil.MaxTrafficWeight(rollout) } for i := *currentStepIndex; i >= 0; i-- { diff --git a/utils/replicaset/canary_test.go b/utils/replicaset/canary_test.go index 375ca43ce4..dbe4ce5a03 100755 --- a/utils/replicaset/canary_test.go +++ b/utils/replicaset/canary_test.go @@ -276,7 +276,7 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { expectedCanaryReplicaCount: 10, }, { - name: "Do not scale newRS down to zero on non-zero weight", + name: "Do not scale canaryRS down to zero on non-zero weight", rolloutSpecReplicas: 1, setWeight: 20, maxSurge: intstr.FromInt(1), @@ -308,7 +308,55 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { expectedCanaryReplicaCount: 1, }, { - name: "Scale up Stable before newRS", + name: "Scale canaryRS to zero on <50 weight without surge", + rolloutSpecReplicas: 1, + setWeight: 49, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(1), + + stableSpecReplica: 1, + stableAvailableReplica: 1, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 1, + expectedCanaryReplicaCount: 0, + }, + { + name: "Scale stableRS down to zero on >=50 weight without surge", + rolloutSpecReplicas: 1, + setWeight: 51, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(1), + + stableSpecReplica: 1, + stableAvailableReplica: 1, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 0, + expectedCanaryReplicaCount: 0, + }, + { + name: "Scale canaryRS to one on >=50 weight without surge and stable replicas", + rolloutSpecReplicas: 1, + setWeight: 51, + maxSurge: intstr.FromInt(0), + maxUnavailable: intstr.FromInt(1), + + stableSpecReplica: 0, + stableAvailableReplica: 0, + + canarySpecReplica: 0, + canaryAvailableReplica: 0, + + expectedStableReplicaCount: 0, + expectedCanaryReplicaCount: 1, + }, + { + name: "Scale up Stable before canaryRS", rolloutSpecReplicas: 10, setWeight: 30, maxSurge: intstr.FromInt(1), @@ -326,7 +374,7 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { olderRS: newRS("older", 3, 3), }, { - name: "Scale down newRS and stable", + name: "Scale down canaryRS and stable", rolloutSpecReplicas: 10, setWeight: 30, maxSurge: intstr.FromInt(0), @@ -358,7 +406,7 @@ func TestCalculateReplicaCountsForCanary(t *testing.T) { expectedCanaryReplicaCount: 9, }, { - name: "Do not scale down newRS or stable when older RS count >= scaleDownCount", + name: "Do not scale down canaryRS or stable when older RS count >= scaleDownCount", rolloutSpecReplicas: 10, setWeight: 30, maxSurge: intstr.FromInt(0), @@ -721,82 +769,83 @@ func TestApproximateWeightedNewStableReplicaCounts(t *testing.T) { tests := []struct { replicas int32 weight int32 + maxWeight int32 maxSurge int32 expCanary int32 expStable int32 }{ - {replicas: 0, weight: 0, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% - {replicas: 0, weight: 50, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% - {replicas: 0, weight: 100, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% - - {replicas: 0, weight: 0, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% - {replicas: 0, weight: 50, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% - {replicas: 0, weight: 100, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% - - {replicas: 1, weight: 0, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% - {replicas: 1, weight: 1, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% - {replicas: 1, weight: 49, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% - {replicas: 1, weight: 50, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% - {replicas: 1, weight: 99, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% - {replicas: 1, weight: 100, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% - - {replicas: 1, weight: 0, maxSurge: 1, expCanary: 0, expStable: 1}, // 0% - {replicas: 1, weight: 1, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% - {replicas: 1, weight: 49, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% - {replicas: 1, weight: 50, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% - {replicas: 1, weight: 99, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% - {replicas: 1, weight: 100, maxSurge: 1, expCanary: 1, expStable: 0}, // 100% - - {replicas: 2, weight: 0, maxSurge: 0, expCanary: 0, expStable: 2}, // 0% - {replicas: 2, weight: 1, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% - {replicas: 2, weight: 50, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% - {replicas: 2, weight: 99, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% - {replicas: 2, weight: 100, maxSurge: 0, expCanary: 2, expStable: 0}, // 100% - - {replicas: 2, weight: 0, maxSurge: 1, expCanary: 0, expStable: 2}, // 0% - {replicas: 2, weight: 1, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 2, weight: 50, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% - {replicas: 2, weight: 99, maxSurge: 1, expCanary: 2, expStable: 1}, // 66.6% - {replicas: 2, weight: 100, maxSurge: 1, expCanary: 2, expStable: 0}, // 100% - - {replicas: 3, weight: 10, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 25, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 33, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 34, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 49, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 50, maxSurge: 0, expCanary: 2, expStable: 1}, // 66.6% - - {replicas: 3, weight: 10, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% - {replicas: 3, weight: 25, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% - {replicas: 3, weight: 33, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 34, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% - {replicas: 3, weight: 49, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% - {replicas: 3, weight: 50, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% - - {replicas: 10, weight: 0, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% - {replicas: 10, weight: 1, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% - {replicas: 10, weight: 14, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% - {replicas: 10, weight: 15, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% - {replicas: 10, weight: 16, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% - {replicas: 10, weight: 99, maxSurge: 0, expCanary: 9, expStable: 1}, // 90% - {replicas: 10, weight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% - - {replicas: 10, weight: 0, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% - {replicas: 10, weight: 1, maxSurge: 1, expCanary: 1, expStable: 10}, // 9.1% - {replicas: 10, weight: 18, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% - {replicas: 10, weight: 19, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% - {replicas: 10, weight: 20, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% - {replicas: 10, weight: 23, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% - {replicas: 10, weight: 24, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% - {replicas: 10, weight: 25, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% - {replicas: 10, weight: 99, maxSurge: 1, expCanary: 10, expStable: 1}, // 90.9% - {replicas: 10, weight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% + {replicas: 0, weight: 0, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 50, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 100, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 0}, // 0% + + {replicas: 0, weight: 0, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 50, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + {replicas: 0, weight: 100, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 0}, // 0% + + {replicas: 1, weight: 0, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 1, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 49, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 50, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + {replicas: 1, weight: 99, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + {replicas: 1, weight: 100, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 0}, // 100% + + {replicas: 1, weight: 0, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 1}, // 0% + {replicas: 1, weight: 1, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 49, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 50, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 99, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 1, weight: 100, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 0}, // 100% + + {replicas: 2, weight: 0, maxWeight: 100, maxSurge: 0, expCanary: 0, expStable: 2}, // 0% + {replicas: 2, weight: 1, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 50, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 99, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 100, maxWeight: 100, maxSurge: 0, expCanary: 2, expStable: 0}, // 100% + + {replicas: 2, weight: 0, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 2}, // 0% + {replicas: 2, weight: 1, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 2, weight: 50, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 1}, // 50% + {replicas: 2, weight: 99, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 1}, // 66.6% + {replicas: 2, weight: 100, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 0}, // 100% + + {replicas: 3, weight: 10, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 25, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 33, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 34, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 49, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 50, maxWeight: 100, maxSurge: 0, expCanary: 2, expStable: 1}, // 66.6% + + {replicas: 3, weight: 10, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% + {replicas: 3, weight: 25, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 3}, // 25% + {replicas: 3, weight: 33, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 34, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 2}, // 33.3% + {replicas: 3, weight: 49, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% + {replicas: 3, weight: 50, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 2}, // 50% + + {replicas: 10, weight: 0, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% + {replicas: 10, weight: 1, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% + {replicas: 10, weight: 14, maxWeight: 100, maxSurge: 0, expCanary: 1, expStable: 9}, // 10% + {replicas: 10, weight: 15, maxWeight: 100, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 16, maxWeight: 100, maxSurge: 0, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 99, maxWeight: 100, maxSurge: 0, expCanary: 9, expStable: 1}, // 90% + {replicas: 10, weight: 100, maxWeight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% + + {replicas: 10, weight: 0, maxWeight: 100, maxSurge: 1, expCanary: 0, expStable: 10}, // 0% + {replicas: 10, weight: 1, maxWeight: 100, maxSurge: 1, expCanary: 1, expStable: 10}, // 9.1% + {replicas: 10, weight: 18, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% + {replicas: 10, weight: 19, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 9}, // 18.1% + {replicas: 10, weight: 20, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 23, maxWeight: 100, maxSurge: 1, expCanary: 2, expStable: 8}, // 20% + {replicas: 10, weight: 24, maxWeight: 100, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% + {replicas: 10, weight: 25, maxWeight: 100, maxSurge: 1, expCanary: 3, expStable: 8}, // 27.2% + {replicas: 10, weight: 99, maxWeight: 100, maxSurge: 1, expCanary: 10, expStable: 1}, // 90.9% + {replicas: 10, weight: 100, maxWeight: 100, maxSurge: 1, expCanary: 10, expStable: 0}, // 100% } for i := range tests { test := tests[i] - t.Run(fmt.Sprintf("%s_replicas:%d_weight:%d_surge:%d", t.Name(), test.replicas, test.weight, test.maxSurge), func(t *testing.T) { - newRSReplicaCount, stableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(test.replicas, test.weight, test.maxSurge) + t.Run(fmt.Sprintf("%s_replicas:%d_weight:%d_maxweight:%d_surge:%d", t.Name(), test.replicas, test.weight, test.maxWeight, test.maxSurge), func(t *testing.T) { + newRSReplicaCount, stableRSReplicaCount := approximateWeightedCanaryStableReplicaCounts(test.replicas, test.weight, test.maxWeight, test.maxSurge) assert.Equal(t, test.expCanary, newRSReplicaCount, "check canary replica count") assert.Equal(t, test.expStable, stableRSReplicaCount, "check stable replica count") }) @@ -905,12 +954,13 @@ func TestCalculateReplicaCountsForCanaryStableRSdEdgeCases(t *testing.T) { } func TestTrafficWeightToReplicas(t *testing.T) { - assert.Equal(t, int32(0), trafficWeightToReplicas(10, 0)) - assert.Equal(t, int32(2), trafficWeightToReplicas(10, 20)) - assert.Equal(t, int32(3), trafficWeightToReplicas(10, 25)) - assert.Equal(t, int32(4), trafficWeightToReplicas(10, 33)) - assert.Equal(t, int32(10), trafficWeightToReplicas(10, 99)) - assert.Equal(t, int32(10), trafficWeightToReplicas(10, 100)) + assert.Equal(t, int32(0), trafficWeightToReplicas(10, 0, 100)) + assert.Equal(t, int32(2), trafficWeightToReplicas(10, 20, 100)) + assert.Equal(t, int32(3), trafficWeightToReplicas(10, 25, 100)) + assert.Equal(t, int32(4), trafficWeightToReplicas(10, 33, 100)) + assert.Equal(t, int32(10), trafficWeightToReplicas(10, 99, 100)) + assert.Equal(t, int32(10), trafficWeightToReplicas(10, 100, 100)) + assert.Equal(t, int32(23), trafficWeightToReplicas(23, 100000000, 100000000)) } func TestGetOtherRSs(t *testing.T) { diff --git a/utils/rollout/rolloututil.go b/utils/rollout/rolloututil.go index 0b7df7ff38..5411f3f58c 100644 --- a/utils/rollout/rolloututil.go +++ b/utils/rollout/rolloututil.go @@ -4,6 +4,8 @@ import ( "fmt" "strconv" + "github.com/argoproj/argo-rollouts/utils/weightutil" + replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" @@ -184,13 +186,13 @@ func CanaryStepString(c v1alpha1.CanaryStep) string { // ShouldVerifyWeight We use this to test if we should verify weights because weight verification could involve // API calls to the cloud provider which could incur rate limiting -func ShouldVerifyWeight(ro *v1alpha1.Rollout) bool { +func ShouldVerifyWeight(ro *v1alpha1.Rollout, desiredWeight int32) bool { currentStep, _ := replicasetutil.GetCurrentCanaryStep(ro) // If we are in the middle of an update at a setWeight step, also perform weight verification. // Note that we don't do this every reconciliation because weight verification typically involves // API calls to the cloud provider which could incur rate limitingq - shouldVerifyWeight := ro.Status.StableRS != "" && - !IsFullyPromoted(ro) && - currentStep != nil && currentStep.SetWeight != nil + shouldVerifyWeight := (ro.Status.StableRS != "" && !IsFullyPromoted(ro) && currentStep != nil && currentStep.SetWeight != nil) || + (ro.Status.StableRS != "" && !IsFullyPromoted(ro) && currentStep == nil && desiredWeight == weightutil.MaxTrafficWeight(ro)) // We are at end of rollout + return shouldVerifyWeight } diff --git a/utils/rollout/rolloututil_test.go b/utils/rollout/rolloututil_test.go index 37c1810f00..d88f080f64 100644 --- a/utils/rollout/rolloututil_test.go +++ b/utils/rollout/rolloututil_test.go @@ -422,15 +422,21 @@ func TestShouldVerifyWeight(t *testing.T) { ro.Spec.Strategy.Canary.Steps = []v1alpha1.CanaryStep{{ SetWeight: pointer.Int32Ptr(20), }} - assert.Equal(t, true, ShouldVerifyWeight(ro)) + assert.Equal(t, true, ShouldVerifyWeight(ro, 20)) ro.Status.StableRS = "" - assert.Equal(t, false, ShouldVerifyWeight(ro)) + assert.Equal(t, false, ShouldVerifyWeight(ro, 20)) ro.Status.StableRS = "34feab23f" ro.Status.CurrentStepIndex = nil ro.Spec.Strategy.Canary.Steps = nil - assert.Equal(t, false, ShouldVerifyWeight(ro)) + assert.Equal(t, false, ShouldVerifyWeight(ro, 20)) + + // Test when the weight is 100, because we are at end of rollout + ro.Status.StableRS = "34feab23f" + ro.Status.CurrentStepIndex = nil + ro.Spec.Strategy.Canary.Steps = nil + assert.Equal(t, true, ShouldVerifyWeight(ro, 100)) } func Test_isGenerationObserved(t *testing.T) { diff --git a/utils/time/now.go b/utils/time/now.go index 1b51cb3cc0..87952a91b4 100644 --- a/utils/time/now.go +++ b/utils/time/now.go @@ -1,13 +1,36 @@ package time import ( + "sync" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var ( + timeNowFunc = time.Now + + // Acquire this mutex when accessing now function + nowLock sync.RWMutex +) + // Now is a wrapper around time.Now() and used to override behavior in tests. -var Now = time.Now +// Now invokes time.Now(), or its replacement function +func Now() time.Time { + nowLock.RLock() + defer nowLock.RUnlock() + + return timeNowFunc() +} + +// Replace the function used to return the current time (defaults to time.Now() ) +func SetNowTimeFunc(f func() time.Time) { + nowLock.Lock() + defer nowLock.Unlock() + + timeNowFunc = f + +} // MetaNow is a wrapper around metav1.Now() and used to override behavior in tests. var MetaNow = func() metav1.Time { diff --git a/utils/tolerantinformer/tolerantinformer_test.go b/utils/tolerantinformer/tolerantinformer_test.go index 351601e3a9..84c25c1215 100644 --- a/utils/tolerantinformer/tolerantinformer_test.go +++ b/utils/tolerantinformer/tolerantinformer_test.go @@ -122,7 +122,7 @@ func TestMalformedRolloutEphemeralCtr(t *testing.T) { verify(list[0]) } -func verifyAnalysisSpec(t *testing.T, s interface{}) { +func verifyAnalysisSpec(t *testing.T, s any) { // metrics: // - name: test // provider: diff --git a/utils/tolerantinformer/tollerantinformer.go b/utils/tolerantinformer/tollerantinformer.go index aba0bd7f29..e3996f9cce 100644 --- a/utils/tolerantinformer/tollerantinformer.go +++ b/utils/tolerantinformer/tollerantinformer.go @@ -14,7 +14,7 @@ import ( // convertObject converts a runtime.Object into the supplied concrete typed object // typedObj should be a pointer to a typed object which is desired to be filled in. // This is a best effort conversion which ignores unmarshalling errors. -func convertObject(object runtime.Object, typedObj interface{}) error { +func convertObject(object runtime.Object, typedObj any) error { un, ok := object.(*unstructured.Unstructured) if !ok { return fmt.Errorf("malformed object: expected \"*unstructured.Unstructured\", got \"%s\"", reflect.TypeOf(object).Name()) @@ -35,7 +35,7 @@ func convertObject(object runtime.Object, typedObj interface{}) error { return nil } -func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { +func fromUnstructuredViaJSON(u map[string]any, obj any) error { data, err := json.Marshal(u) if err != nil { return err diff --git a/utils/unstructured/unstructured.go b/utils/unstructured/unstructured.go index 7c73812da3..a0dd341a48 100644 --- a/utils/unstructured/unstructured.go +++ b/utils/unstructured/unstructured.go @@ -13,7 +13,7 @@ import ( ) func StrToUnstructuredUnsafe(jsonStr string) *unstructured.Unstructured { - obj := make(map[string]interface{}) + obj := make(map[string]any) err := yaml.Unmarshal([]byte(jsonStr), &obj) if err != nil { panic(err) @@ -22,7 +22,7 @@ func StrToUnstructuredUnsafe(jsonStr string) *unstructured.Unstructured { } func StrToUnstructured(jsonStr string) (*unstructured.Unstructured, error) { - obj := make(map[string]interface{}) + obj := make(map[string]any) err := yaml.Unmarshal([]byte(jsonStr), &obj) if err != nil { return nil, err @@ -30,7 +30,7 @@ func StrToUnstructured(jsonStr string) (*unstructured.Unstructured, error) { return &unstructured.Unstructured{Object: obj}, nil } -func ObjectToRollout(obj interface{}) *v1alpha1.Rollout { +func ObjectToRollout(obj any) *v1alpha1.Rollout { un, ok := obj.(*unstructured.Unstructured) if ok { var ro v1alpha1.Rollout @@ -49,7 +49,7 @@ func ObjectToRollout(obj interface{}) *v1alpha1.Rollout { return ro } -func ObjectToAnalysisRun(obj interface{}) *v1alpha1.AnalysisRun { +func ObjectToAnalysisRun(obj any) *v1alpha1.AnalysisRun { un, ok := obj.(*unstructured.Unstructured) if ok { var ar v1alpha1.AnalysisRun @@ -67,7 +67,7 @@ func ObjectToAnalysisRun(obj interface{}) *v1alpha1.AnalysisRun { return ar } -func ObjectToExperiment(obj interface{}) *v1alpha1.Experiment { +func ObjectToExperiment(obj any) *v1alpha1.Experiment { un, ok := obj.(*unstructured.Unstructured) if ok { var ex v1alpha1.Experiment @@ -93,7 +93,7 @@ func SplitYAML(out string) ([]*unstructured.Unstructured, error) { parts := diffSeparator.Split(out, -1) var objs []*unstructured.Unstructured for _, part := range parts { - var objMap map[string]interface{} + var objMap map[string]any err := yaml.Unmarshal([]byte(part), &objMap) if err != nil { return objs, err diff --git a/utils/weightutil/weight.go b/utils/weightutil/weight.go new file mode 100644 index 0000000000..357a5fdcd3 --- /dev/null +++ b/utils/weightutil/weight.go @@ -0,0 +1,13 @@ +package weightutil + +import ( + "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" +) + +func MaxTrafficWeight(ro *v1alpha1.Rollout) int32 { + maxWeight := int32(100) + if ro.Spec.Strategy.Canary != nil && ro.Spec.Strategy.Canary.TrafficRouting != nil && ro.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight != nil { + maxWeight = *ro.Spec.Strategy.Canary.TrafficRouting.MaxTrafficWeight + } + return maxWeight +}