diff --git a/flytectl/.gitattributes b/flytectl/.gitattributes new file mode 100644 index 0000000000..53c12facc6 --- /dev/null +++ b/flytectl/.gitattributes @@ -0,0 +1 @@ +docs/**/**/*rst linguist-generated=true diff --git a/flytectl/.github/PULL_REQUEST_TEMPLATE.md b/flytectl/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..97321913f9 --- /dev/null +++ b/flytectl/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,32 @@ +## Read then delete + +- Make sure to use a concise title for the pull-request. +- Use #patch, #minor #majora or #none in the pull-request title to bump the corresponding version. Otherwise, the patch version + will be bumped. [More details](https://github.com/marketplace/actions/github-tag-bump) + +# TL;DR +_Please replace this text with a description of what this PR accomplishes._ + +## Type +- [ ] Bug Fix +- [ ] Feature +- [ ] Plugin + +## Are all requirements met? + +- [ ] Code completed +- [ ] Smoke tested +- [ ] Unit tests added +- [ ] Code documentation added +- [ ] Any pending items have an associated Issue + +## Complete description +_How did you fix the bug, make the feature etc. Link to any design docs etc_ + +## Tracking Issue +https://github.com/flyteorg/flyte/issues/ + +## Follow-up issue +_NA_ +OR +_https://github.com/flyteorg/flyte/issues/_ diff --git a/flytectl/.github/workflows/checks.yml b/flytectl/.github/workflows/checks.yml new file mode 100644 index 0000000000..211a7aab5b --- /dev/null +++ b/flytectl/.github/workflows/checks.yml @@ -0,0 +1,165 @@ +name: Flytectl Checks + +on: + workflow_dispatch: + pull_request: + branches: + - master + paths-ignore: + - "docs/**" + - "boilerplate/**" + push: + branches: + - master + paths-ignore: + - "docs/**" + - "boilerplate/**" + +jobs: + lint: + name: Lint + uses: flyteorg/flytetools/.github/workflows/lint.yml@master + with: + go-version: 1.19 + + tests: + name: Unit Tests + uses: flyteorg/flytetools/.github/workflows/tests.yml@master + secrets: + FLYTE_BOT_PAT: ${{ secrets.FLYTE_BOT_PAT }} + with: + go-version: 1.19 + + generate: + name: Check Go Gennerate + uses: flyteorg/flytetools/.github/workflows/go_generate.yml@master + with: + go-version: 1.19 + + dry_run_goreleaser: + name: Dry Run Goreleaser + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: "2" + - uses: actions/cache@v2 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('go.sum') }} + - uses: actions/setup-go@v3 + with: + go-version: '1.19' + - name: Run GoReleaser dry run + uses: goreleaser/goreleaser-action@v2 + with: + version: latest + args: --snapshot --skip-publish --rm-dist + + sandbox: + name: Test Getting started + runs-on: ubuntu-latest + steps: + - uses: insightsengineering/disk-space-reclaimer@v1 + - name: Checkout + uses: actions/checkout@v2 + - uses: actions/cache@v2 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('go.sum') }} + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.19 + - name: Build Flytectl binary + run: make compile + - name: Create a sandbox cluster + run: | + bin/flytectl demo start + # Sleep is necessary here since `flyte-proxy` might not be ready + # to serve requests when the above command exits successfully. + # Fixed in: https://github.com/flyteorg/flyte/pull/4348 + # TODO (jeev): Remove this when ^ is released. + sleep 10 + - name: Setup flytectl config + run: bin/flytectl config init + - name: Register cookbook + run: bin/flytectl register examples -d development -p flytesnacks + - name: Teardown Sandbox cluster + run: bin/flytectl sandbox teardown + + bump_version: + name: Bump Version + if: ${{ github.event_name != 'pull_request' }} + needs: [ lint, tests, generate, dry_run_goreleaser, sandbox ] # Only to ensure it can successfully build + uses: flyteorg/flytetools/.github/workflows/bump_version.yml@master + secrets: + FLYTE_BOT_PAT: ${{ secrets.FLYTE_BOT_PAT }} + + generate_docs: + name: Generate documentation + needs: [ bump_version ] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + lfs: true + - uses: actions/setup-go@v1 + with: + go-version: '1.19' + - uses: actions/setup-python@v1 + with: + python-version: 3.8 + - uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: docs-pip-${{ runner.os }}-${{ hashFiles('doc-requirements.txt') }}-${{ hashFiles('doc-requirements.in') }} + restore-keys: docs-pip- + - uses: crazy-max/ghaction-import-gpg@v3 + with: + gpg-private-key: ${{ secrets.FLYTE_BOT_GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.FLYTE_BOT_GPG_PASSPHRASE }} + git-user-signingkey: true + git-commit-gpgsign: true + - name: Install Dependencies + run: | + # Install all requirments + pip install -r doc-requirements.txt + - name: Generate documentation + run: | + make -C docs gendocs + - name: Create Pull Request + id: cpr + uses: peter-evans/create-pull-request@v3 + with: + token: ${{ secrets.FLYTE_BOT_PAT }} + commit-message: Update documentation + committer: Flyte-Bot + author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> + signoff: true + branch: flyte-bot-update-documentation + delete-branch: true + title: 'Update documentation' + body: | + Update documentation + - Auto-generated by [flyte-bot] + labels: | + documentation + draft: false + + goreleaser: + name: Goreleaser + needs: [ bump_version ] # Only to ensure it can successfully build + uses: flyteorg/flytetools/.github/workflows/goreleaser.yml@master + with: + # https://github.com/docker/cli/issues/4437 describes an issue that affects the latest + # version of go 1.19 and 1.20, so pinning to latest known good version for now. + go-version: "1.19.10" + secrets: + FLYTE_BOT_PAT: ${{ secrets.FLYTE_BOT_PAT }} + diff --git a/flytectl/.github/workflows/monodocs_build.yml b/flytectl/.github/workflows/monodocs_build.yml new file mode 100644 index 0000000000..a2610915fb --- /dev/null +++ b/flytectl/.github/workflows/monodocs_build.yml @@ -0,0 +1,54 @@ +name: Monodocs Build + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + docs: + name: Monodocs Build + runs-on: ubuntu-latest + steps: + - name: Fetch flytectl code + uses: actions/checkout@v4 + with: + path: "${{ github.workspace }}/flytectl" + - name: Fetch flyte code + uses: actions/checkout@v4 + with: + repository: flyteorg/flyte + path: "${{ github.workspace }}/flyte" + - uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + python-version: 3.9 + - shell: bash -el {0} + working-directory: ${{ github.workspace }}/flyte + run: | + conda install -c conda-forge conda-lock + conda-lock install -n monodocs-env monodocs-environment.lock.yaml + - shell: bash -el {0} + working-directory: ${{ github.workspace }}/flyte + run: | + conda activate monodocs-env + pip install ./flyteidl + conda info + conda list + conda config --show-sources + conda config --show + printenv | sort + - name: Build the documentation + working-directory: ${{ github.workspace }}/flyte + shell: bash -el {0} + env: + FLYTECTL_LOCAL_PATH: ${{ github.workspace }}/flytectl + run: | + conda activate monodocs-env + make -C docs clean html SPHINXOPTS="-W -vvv" diff --git a/flytectl/.github/workflows/upgrade_automation.yml b/flytectl/.github/workflows/upgrade_automation.yml new file mode 100644 index 0000000000..a087b8fd04 --- /dev/null +++ b/flytectl/.github/workflows/upgrade_automation.yml @@ -0,0 +1,21 @@ +name: Upgrade Automation +on: + workflow_dispatch: + inputs: + component: + description: "Name of Flyte component" + required: true + default: "boilerplate" + type: choice + options: + - boilerplate + - flyteidl + - flytestdlib +jobs: + trigger-upgrade: + name: ${{ github.event.inputs.component }} Upgrade + uses: flyteorg/flytetools/.github/workflows/flyte_automation.yml@master + with: + component: ${{ github.event.inputs.component }} + secrets: + FLYTE_BOT_PAT: ${{ secrets.FLYTE_BOT_PAT }} diff --git a/flytectl/.gitignore b/flytectl/.gitignore new file mode 100644 index 0000000000..3e69f20c81 --- /dev/null +++ b/flytectl/.gitignore @@ -0,0 +1,12 @@ +dist/ +.idea +vendor +bin +.DS_Store +_test +./config.yaml +docs/build/* +cmd/upgrade/flyte.ext +.vscode +# direnv +.envrc diff --git a/flytectl/.golangci.yml b/flytectl/.golangci.yml new file mode 100644 index 0000000000..5d53f35295 --- /dev/null +++ b/flytectl/.golangci.yml @@ -0,0 +1,30 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +run: + skip-dirs: + - pkg/client + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gas + - goconst + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - structcheck + - typecheck + - unconvert + - unparam + - unused + - varcheck diff --git a/flytectl/.goreleaser.yml b/flytectl/.goreleaser.yml new file mode 100644 index 0000000000..15c88a7c5a --- /dev/null +++ b/flytectl/.goreleaser.yml @@ -0,0 +1,89 @@ +before: + hooks: + - go mod download +builds: + - env: + - CGO_ENABLED=0 + main: ./main.go + binary: flytectl + id: flytectl-darwin + goos: + - darwin + goarch: + - arm64 + - amd64 + ldflags: + - -s -w -X github.com/flyteorg/flyte/flytestdlib/version.Version={{.Version}} -X github.com/flyteorg/flyte/flytestdlib/version.Build={{.ShortCommit}} -X github.com/flyteorg/flyte/flytestdlib/version.BuildTime={{.Date}} + - env: + - CGO_ENABLED=0 + main: ./main.go + binary: flytectl + id: flytectl-linux + goos: + - linux + goarch: + - arm64 + - amd64 + ldflags: + - -s -w -X github.com/flyteorg/flyte/flytestdlib/version.Version={{.Version}} -X github.com/flyteorg/flyte/flytestdlib/version.Build={{.ShortCommit}} -X github.com/flyteorg/flyte/flytestdlib/version.BuildTime={{.Date}} + - env: + - CGO_ENABLED=0 + main: ./main.go + binary: flytectl + id: flytectl-windows + goos: + - windows + ldflags: + - -s -w -X github.com/flyteorg/flyte/flytestdlib/version.Version={{.Version}} -X github.com/flyteorg/flyte/flytestdlib/version.Build={{.ShortCommit}} -X github.com/flyteorg/flyte/flytestdlib/version.BuildTime={{.Date}} +archives: + - name_template: |- + {{ .ProjectName }}_ + {{- if eq .Os "darwin" }}Darwin + {{- else if eq .Os "linux" }}Linux + {{- else if eq .Os "windows" }}Windows + {{- else }}{{ .Os }}{{ end }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' +brews: + - # Name template of the recipe + # Default to project name + name: flytectl + + folder: Formula + + # Github repository to push the tap to. + tap: + owner: flyteorg + name: homebrew-tap + + # Git author used to commit to the repository. + # Defaults are shown. + commit_author: + name: Flyte-Bot + email: admin@flyte.org + + # Your app's homepage. + # Default is empty. + homepage: "https://docs.flyte.org/en/latest/flytectl/docs_index.html" + + # Your app's description. + # Default is empty. + description: "FlyteCtl is a command line tool to interact with a Flyte cluster." + + # Default is false. + skip_upload: auto diff --git a/flytectl/.readthedocs.yml b/flytectl/.readthedocs.yml new file mode 100644 index 0000000000..c24f894fd1 --- /dev/null +++ b/flytectl/.readthedocs.yml @@ -0,0 +1,19 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +python: + install: + - requirements: doc-requirements.txt + +build: + os: "ubuntu-22.04" + tools: + python: "3.8" diff --git a/flytectl/CODEOWNERS b/flytectl/CODEOWNERS new file mode 100644 index 0000000000..78dadcf414 --- /dev/null +++ b/flytectl/CODEOWNERS @@ -0,0 +1,3 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence. +* @kumare3 @enghabu @wild-endeavor diff --git a/flytectl/CODE_OF_CONDUCT.md b/flytectl/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e12139d691 --- /dev/null +++ b/flytectl/CODE_OF_CONDUCT.md @@ -0,0 +1,2 @@ +This project is governed by LF AI Foundation's [code of conduct](https://lfprojects.org/policies/code-of-conduct/). +All contributors and participants agree to abide by its terms. diff --git a/flytectl/LICENSE b/flytectl/LICENSE new file mode 100644 index 0000000000..bed437514f --- /dev/null +++ b/flytectl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Lyft, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/flytectl/Makefile b/flytectl/Makefile new file mode 100644 index 0000000000..e190089133 --- /dev/null +++ b/flytectl/Makefile @@ -0,0 +1,40 @@ +export REPOSITORY=flytectl +include boilerplate/flyte/golang_test_targets/Makefile +include boilerplate/flyte/precommit/Makefile + +GIT_VERSION := $(shell git describe --always --tags) +GIT_HASH := $(shell git rev-parse --short HEAD) +TIMESTAMP := $(shell date '+%Y-%m-%d') +PACKAGE ?=github.com/flyteorg/flyte/flytestdlib + +LD_FLAGS="-s -w -X $(PACKAGE)/version.Version=$(GIT_VERSION) -X $(PACKAGE)/version.Build=$(GIT_HASH) -X $(PACKAGE)/version.BuildTime=$(TIMESTAMP)" + +define PIP_COMPILE +pip-compile $(1) --upgrade --verbose +endef + +compile: + go build -o bin/flytectl -ldflags=$(LD_FLAGS) main.go + +compile_debug: + go build -gcflags='all=-N -l' -o bin/flytectl main.go + +.PHONY: update_boilerplate +update_boilerplate: + @curl https://raw.githubusercontent.com/flyteorg/boilerplate/master/boilerplate/update.sh -o boilerplate/update.sh + @boilerplate/update.sh + +.PHONY: install-piptools +install-piptools: + pip install -U pip-tools + +.PHONY: doc-requirements.txt +doc-requirements.txt: doc-requirements.in install-piptools + $(call PIP_COMPILE,doc-requirements.in) + +.PHONY: test_unit_without_flag +test_unit_without_flag: + go test ./... -race -coverprofile=coverage.temp.txt -covermode=atomic + cat coverage.temp.txt | grep -v "_flags.go" > coverage.txt + rm coverage.temp.txt + curl -s https://codecov.io/bash > codecov_bash.sh && bash codecov_bash.sh diff --git a/flytectl/NOTICE b/flytectl/NOTICE new file mode 100644 index 0000000000..938e7ae038 --- /dev/null +++ b/flytectl/NOTICE @@ -0,0 +1,4 @@ +flytectl +Copyright 2019-2020 Lyft Inc. + +This product includes software developed at Lyft Inc. diff --git a/flytectl/README.md b/flytectl/README.md new file mode 100644 index 0000000000..5a6e5fc26e --- /dev/null +++ b/flytectl/README.md @@ -0,0 +1,71 @@ + +

+ Flyte Logo +

+

+ FlyteCTL +

+

+ Flyte's official command-line interface +

+

+ Documentation + ยท + Contribution Guide +

+ + +[![Docs](https://readthedocs.org/projects/flytectl/badge/?version=latest&style=plastic)](https://flytectl.rtfd.io) +[![Current Release](https://img.shields.io/github/release/flyteorg/flytectl.svg)](https://github.com/flyteorg/flytectl/releases/latest) +![Master](https://github.com/flyteorg/flytectl/workflows/Master/badge.svg) +[![GoDoc](https://godoc.org/github.com/flyteorg/flytectl?status.svg)](https://pkg.go.dev/mod/github.com/flyteorg/flytectl) +[![License](https://img.shields.io/badge/LICENSE-Apache2.0-ff69b4.svg)](http://www.apache.org/licenses/LICENSE-2.0.html) +[![CodeCoverage](https://img.shields.io/codecov/c/github/flyteorg/flytectl.svg)](https://codecov.io/gh/flyteorg/flytectl) +[![Go Report Card](https://goreportcard.com/badge/github.com/flyteorg/flytectl)](https://goreportcard.com/report/github.com/flyteorg/flytectl) +![Commit activity](https://img.shields.io/github/commit-activity/w/lyft/flytectl.svg?style=plastic) +![Commit since last release](https://img.shields.io/github/commits-since/lyft/flytectl/latest.svg?style=plastic) +[![Slack](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://slack.flyte.org) + +Flytectl was designed as a portable and lightweight command-line interface to work with Flyte. It is written in Golang and accesses [FlyteAdmin](https://github.com/flyteorg/flyteadmin/), the control plane for Flyte. + +## ๐Ÿš€ Quick Start + +1. Install Flytectl with bash or shell script. + + * Bash + ```bash + $ brew install flyteorg/homebrew-tap/flytectl + ``` + * Shell script + ```bash + $ curl -sL https://ctl.flyte.org/install | bash + ``` +2. (Optional) `flytectl upgrade` provides a general interface to upgrading Flytectl; run the command in the output. + +3. Start Sandbox using Flytectl. + ```bash + $ flytectl sandbox start + ``` + +4. Register examples. + ```bash + # Register core workflows + $ flytectl register examples -d development -p flytesnacks + ``` + + +

+ ๐Ÿ“– How to Contribute to Flytectl +

+ + +You can find the detailed contribution guide [here](docs/source/contribute.rst). + + +

+ ๐Ÿž File an Issue +

+ + +Refer to the [issues](https://docs.flyte.org/en/latest/community/contribute.html#file-an-issue) section in the contribution +guide if you'd like to file an issue. diff --git a/flytectl/boilerplate/flyte/code_of_conduct/CODE_OF_CONDUCT.md b/flytectl/boilerplate/flyte/code_of_conduct/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e12139d691 --- /dev/null +++ b/flytectl/boilerplate/flyte/code_of_conduct/CODE_OF_CONDUCT.md @@ -0,0 +1,2 @@ +This project is governed by LF AI Foundation's [code of conduct](https://lfprojects.org/policies/code-of-conduct/). +All contributors and participants agree to abide by its terms. diff --git a/flytectl/boilerplate/flyte/code_of_conduct/README.rst b/flytectl/boilerplate/flyte/code_of_conduct/README.rst new file mode 100644 index 0000000000..0c9f2f1ec5 --- /dev/null +++ b/flytectl/boilerplate/flyte/code_of_conduct/README.rst @@ -0,0 +1,2 @@ +CODE OF CONDUCT +~~~~~~~~~~~~~~~ diff --git a/flytectl/boilerplate/flyte/code_of_conduct/update.sh b/flytectl/boilerplate/flyte/code_of_conduct/update.sh new file mode 100755 index 0000000000..42f6158460 --- /dev/null +++ b/flytectl/boilerplate/flyte/code_of_conduct/update.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +cp ${DIR}/CODE_OF_CONDUCT.md ${DIR}/../../../CODE_OF_CONDUCT.md diff --git a/flytectl/boilerplate/flyte/golang_support_tools/go.mod b/flytectl/boilerplate/flyte/golang_support_tools/go.mod new file mode 100644 index 0000000000..2cfeb8aa3a --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_support_tools/go.mod @@ -0,0 +1,247 @@ +module github.com/flyteorg/boilerplate + +go 1.19 + +require ( + github.com/EngHabu/mockery v0.0.0-20220405200825-3f76291311cf + github.com/alvaroloes/enumer v1.1.2 + github.com/flyteorg/flytestdlib v0.4.16 + github.com/golangci/golangci-lint v1.53.3 + github.com/pseudomuto/protoc-gen-doc v1.4.1 +) + +require ( + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + cloud.google.com/go v0.110.2 // indirect + cloud.google.com/go/compute v1.19.3 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.2 // indirect + cloud.google.com/go/storage v1.29.0 // indirect + github.com/4meepo/tagalign v1.2.2 // indirect + github.com/Abirdcfly/dupword v0.0.11 // indirect + github.com/Antonboom/errname v0.1.10 // indirect + github.com/Antonboom/nilnil v0.1.5 // indirect + github.com/Azure/azure-sdk-for-go v62.3.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.17 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.10 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.0 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/sprig v2.15.0+incompatible // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect + github.com/alexkohler/nakedret/v2 v2.0.2 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/aokoli/goutils v1.0.1 // indirect + github.com/ashanbrown/forbidigo v1.5.3 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/aws/aws-sdk-go v1.37.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v3 v3.4.0 // indirect + github.com/breml/bidichk v0.2.4 // indirect + github.com/breml/errchkjson v0.3.1 // indirect + github.com/butuzov/ireturn v0.2.0 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect + github.com/coocood/freecache v1.1.1 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.10.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.0 // indirect + github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect + github.com/flyteorg/stow v0.3.1 // indirect + github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-critic/go-critic v0.8.1 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect + github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.4.0 // indirect + github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/huandu/xstrings v1.0.0 // indirect + github.com/imdario/mergo v0.3.5 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kisielk/errcheck v1.6.3 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.7 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect + github.com/ldez/gomoddirectives v0.2.3 // indirect + github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.3.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moricho/tparallel v0.3.1 // indirect + github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/ncw/swift v1.0.53 // indirect + github.com/nishanths/exhaustive v0.11.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.12.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v1.4.2 // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/pseudomuto/protokit v0.2.0 // indirect + github.com/quasilyte/go-ruleguard v0.3.19 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/ryancurrah/gomodguard v1.3.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect + github.com/securego/gosec/v2 v2.16.0 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.12.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.0.6 // indirect + github.com/xen0n/gosmopolitan v1.2.1 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.2.0 // indirect + github.com/ykadowak/zerologlint v0.1.2 // indirect + gitlab.com/bosi/decorder v0.2.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.tmz.dev/musttag v0.7.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect + golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect + golang.org/x/tools v0.11.1 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.126.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/grpc v1.55.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.4.3 // indirect + k8s.io/apimachinery v0.20.2 // indirect + k8s.io/client-go v0.0.0-20210217172142-7279fc64d847 // indirect + k8s.io/klog/v2 v2.5.0 // indirect + mvdan.cc/gofumpt v0.5.0 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect +) + +replace github.com/pseudomuto/protoc-gen-doc => github.com/flyteorg/protoc-gen-doc v1.4.2 diff --git a/flytectl/boilerplate/flyte/golang_support_tools/go.sum b/flytectl/boilerplate/flyte/golang_support_tools/go.sum new file mode 100644 index 0000000000..4cc434803e --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_support_tools/go.sum @@ -0,0 +1,1225 @@ +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw= +github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU= +github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA= +github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls= +github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA= +github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0= +github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk= +github.com/Azure/azure-sdk-for-go v62.3.0+incompatible h1:Ctfsn9UoA/BB4HMYQlbPPgNXdX0tZ4tmb85+KFb2+RE= +github.com/Azure/azure-sdk-for-go v62.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= +github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/EngHabu/mockery v0.0.0-20220405200825-3f76291311cf h1:M7A2Tn3R8rVgsoJHHKkmkpiNOItys4GxJj6JytRjdDg= +github.com/EngHabu/mockery v0.0.0-20220405200825-3f76291311cf/go.mod h1:Kya4Y46gyq/3TEyAzeNe5UkCk+W9apy5KbuX+5KnZ6M= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= +github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfAMY= +github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/ashanbrown/forbidigo v1.5.3 h1:jfg+fkm/snMx+V9FBwsl1d340BV/99kZGv5jN9hBoXk= +github.com/ashanbrown/forbidigo v1.5.3/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY= +github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= +github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= +github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8= +github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s= +github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ= +github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U= +github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4= +github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0= +github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo= +github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= +github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0= +github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.10.0 h1:oIfnZFdC0YhpNNEX+SuIqko4cqqVZeN9IGTrhZje83Y= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es= +github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607/go.mod h1:Cg4fM0vhYWOZdgM7RIOSTRNIc8/VT7CXClC3Ni86lu4= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/flyteorg/flytestdlib v0.4.16 h1:r4dCPUOqoE9xCAhOw9KDB7O6cBoCxyEtepIWYcj93H0= +github.com/flyteorg/flytestdlib v0.4.16/go.mod h1:WA5Y4hrcgD0ybGOKJVOQ4sP8q7NLRV+S5SWOlH0axgM= +github.com/flyteorg/protoc-gen-doc v1.4.2 h1:Otw0F+RHaPQ8XlpzhLLgjsCMcrAIcMO01Zh+ALe3rrE= +github.com/flyteorg/protoc-gen-doc v1.4.2/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= +github.com/flyteorg/stow v0.3.1 h1:cBMbWl03Gsy5KoA5mutUYTuYpqtT7Pb8+ANGCLnmFEs= +github.com/flyteorg/stow v0.3.1/go.mod h1:HBld7ud0i4khMHwJjkO8v+NSP7ddKa/ruhf4I8fliaA= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-critic/go-critic v0.8.1 h1:16omCF1gN3gTzt4j4J6fKI/HnRojhEp+Eks6EuKw3vw= +github.com/go-critic/go-critic v0.8.1/go.mod h1:kpzXl09SIJX1cr9TB/g/sAG+eFEl7ZS9f9cqvZtyNl0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/rSU6sSMo= +github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0= +github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= +github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.7 h1:2uCk94js0+nVNQoHZNLBkAR1DQJrVzw6T0RMzJn55dQ= +github.com/kunwardeep/paralleltest v1.0.7/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U= +github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007 h1:28i1IjGcx8AofiB4N3q5Yls55VEaitzuEPkFJEVgGkA= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= +github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= +github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t79UVrERQ= +github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 h1:/I3lTljEEDNYLho3/FUB7iD/oc2cEFgVmbHzV+O0PtU= +github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5JxqMiuNYyFNmyY9rkJ/slN8y59oEu4Ei7F8OoKWQ= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIUxYwn8d0= +github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc= +github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI= +github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0= +github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU= +github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U= +github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw= +github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/ykadowak/zerologlint v0.1.2 h1:Um4P5RMmelfjQqQJKtE8ZW+dLZrXrENeIzWWKw800U4= +github.com/ykadowak/zerologlint v0.1.2/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= +gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc= +go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU= +golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.11.1 h1:ojD5zOW8+7dOGzdnNgersm8aPfcDjhMp12UfG93NIMc= +golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= +honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +k8s.io/api v0.0.0-20210217171935-8e2decd92398/go.mod h1:60tmSUpHxGPFerNHbo/ayI2lKxvtrhbxFyXuEIWJd78= +k8s.io/apimachinery v0.0.0-20210217011835-527a61b4dffe/go.mod h1:Z7ps/g0rjlTeMstYrMOUttJfT2Gg34DEaG/f2PYLCWY= +k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= +k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/client-go v0.0.0-20210217172142-7279fc64d847 h1:d+LBRNY3c/KGp7lDblRlUJkayx4Vla7WUTIazoGMdYo= +k8s.io/client-go v0.0.0-20210217172142-7279fc64d847/go.mod h1:q0EaghmVye2uui19vxSZ2NG6ssgUWgjudO6vrwXneSI= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= +k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= +mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/flytectl/boilerplate/flyte/golang_support_tools/tools.go b/flytectl/boilerplate/flyte/golang_support_tools/tools.go new file mode 100644 index 0000000000..a78b61162a --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_support_tools/tools.go @@ -0,0 +1,12 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/EngHabu/mockery/cmd/mockery" + _ "github.com/alvaroloes/enumer" + _ "github.com/flyteorg/flyte/flytestdlib/cli/pflags" + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc" +) diff --git a/flytectl/boilerplate/flyte/golang_test_targets/Makefile b/flytectl/boilerplate/flyte/golang_test_targets/Makefile new file mode 100644 index 0000000000..aad7f21986 --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_test_targets/Makefile @@ -0,0 +1,56 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + + +.PHONY: download_tooling +download_tooling: #download dependencies (including test deps) for the package + @boilerplate/flyte/golang_test_targets/download_tooling.sh + +.PHONY: generate +generate: download_tooling #generate go code + @boilerplate/flyte/golang_test_targets/go-gen.sh + +.PHONY: lint +lint: download_tooling #lints the package for common code smells + GL_DEBUG=linters_output,env golangci-lint run --deadline=5m --exclude deprecated -v + +# If code is failing goimports linter, this will fix. +# skips 'vendor' +.PHONY: goimports +goimports: + @boilerplate/flyte/golang_test_targets/goimports + +.PHONY: mod_download +mod_download: #download dependencies (including test deps) for the package + go mod download + +.PHONY: install +install: download_tooling mod_download + +.PHONY: show +show: + go list -m all + +.PHONY: test_unit +test_unit: + go test -cover ./... -race + +.PHONY: test_benchmark +test_benchmark: + go test -bench . ./... + +.PHONY: test_unit_cover +test_unit_cover: + go test ./... -coverprofile /tmp/cover.out -covermode=count + go tool cover -func /tmp/cover.out + +.PHONY: test_unit_visual +test_unit_visual: + go test ./... -coverprofile /tmp/cover.out -covermode=count + go tool cover -html=/tmp/cover.out + +.PHONY: test_unit_codecov +test_unit_codecov: + go test ./... -race -coverprofile=coverage.txt -covermode=atomic diff --git a/flytectl/boilerplate/flyte/golang_test_targets/Readme.rst b/flytectl/boilerplate/flyte/golang_test_targets/Readme.rst new file mode 100644 index 0000000000..f9d890fdd7 --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_test_targets/Readme.rst @@ -0,0 +1,31 @@ +Golang Test Targets +~~~~~~~~~~~~~~~~~~~ + +Provides an ``install`` make target that uses ``go mod`` to install golang dependencies. + +Provides a ``lint`` make target that uses golangci to lint your code. + +Provides a ``test_unit`` target for unit tests. + +Provides a ``test_unit_cover`` target for analysing coverage of unit tests, which will output the coverage of each function and total statement coverage. + +Provides a ``test_unit_visual`` target for visualizing coverage of unit tests through an interactive html code heat map. + +Provides a ``test_benchmark`` target for benchmark tests. + +**To Enable:** + +Add ``flyteorg/golang_test_targets`` to your ``boilerplate/update.cfg`` file. + +Make sure you're using ``go mod`` for dependency management. + +Provide a ``.golangci`` configuration (the lint target requires it). + +Add ``include boilerplate/flyte/golang_test_targets/Makefile`` in your main ``Makefile`` _after_ your REPOSITORY environment variable + +:: + + REPOSITORY= + include boilerplate/flyte/golang_test_targets/Makefile + +(this ensures the extra make targets get included in your main Makefile) diff --git a/flytectl/boilerplate/flyte/golang_test_targets/download_tooling.sh b/flytectl/boilerplate/flyte/golang_test_targets/download_tooling.sh new file mode 100755 index 0000000000..c7e5577ef3 --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_test_targets/download_tooling.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Everything in this file needs to be installed outside of current module +# The reason we cannot turn off module entirely and install is that we need the replace statement in go.mod +# because we are installing a mockery fork. Turning it off would result installing the original not the fork. +# We also want to version all the other tools. We also want to be able to run go mod tidy without removing the version +# pins. To facilitate this, we're maintaining two sets of go.mod/sum files - the second one only for tooling. This is +# the same approach that go 1.14 will take as well. +# See: +# https://github.com/flyteorg/flyte/issues/129 +# https://github.com/golang/go/issues/30515 for some background context +# https://github.com/go-modules-by-example/index/blob/5ec250b4b78114a55001bd7c9cb88f6e07270ea5/010_tools/README.md + +set -e + +# List of tools to go get +# In the format of ":" or ":" if no cli +tools=( + "github.com/EngHabu/mockery/cmd/mockery" + "github.com/flyteorg/flytestdlib/cli/pflags@latest" + "github.com/golangci/golangci-lint/cmd/golangci-lint" + "github.com/alvaroloes/enumer" + "github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc" +) + +tmp_dir=$(mktemp -d -t gotooling-XXX) +echo "Using temp directory ${tmp_dir}" +cp -R boilerplate/flyte/golang_support_tools/* $tmp_dir +pushd "$tmp_dir" + +for tool in "${tools[@]}" +do + echo "Installing ${tool}" + GO111MODULE=on go install $tool +done + +popd diff --git a/flytectl/boilerplate/flyte/golang_test_targets/go-gen.sh b/flytectl/boilerplate/flyte/golang_test_targets/go-gen.sh new file mode 100755 index 0000000000..54bd6af61b --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_test_targets/go-gen.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -ex + +echo "Running go generate" +go generate ./... + +# This section is used by GitHub workflow to ensure that the generation step was run +if [ -n "$DELTA_CHECK" ]; then + DIRTY=$(git status --porcelain) + if [ -n "$DIRTY" ]; then + echo "FAILED: Go code updated without commiting generated code." + echo "Ensure make generate has run and all changes are committed." + DIFF=$(git diff) + echo "diff detected: $DIFF" + DIFF=$(git diff --name-only) + echo "files different: $DIFF" + exit 1 + else + echo "SUCCESS: Generated code is up to date." + fi +fi diff --git a/flytectl/boilerplate/flyte/golang_test_targets/goimports b/flytectl/boilerplate/flyte/golang_test_targets/goimports new file mode 100755 index 0000000000..af1829036c --- /dev/null +++ b/flytectl/boilerplate/flyte/golang_test_targets/goimports @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +goimports -w $(find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/client/*" -not -path "./boilerplate/*") diff --git a/flytectl/boilerplate/flyte/golangci_file/.golangci.yml b/flytectl/boilerplate/flyte/golangci_file/.golangci.yml new file mode 100644 index 0000000000..5d53f35295 --- /dev/null +++ b/flytectl/boilerplate/flyte/golangci_file/.golangci.yml @@ -0,0 +1,30 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +run: + skip-dirs: + - pkg/client + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gas + - goconst + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - structcheck + - typecheck + - unconvert + - unparam + - unused + - varcheck diff --git a/flytectl/boilerplate/flyte/golangci_file/Readme.rst b/flytectl/boilerplate/flyte/golangci_file/Readme.rst new file mode 100644 index 0000000000..e4cbd18b96 --- /dev/null +++ b/flytectl/boilerplate/flyte/golangci_file/Readme.rst @@ -0,0 +1,8 @@ +GolangCI File +~~~~~~~~~~~~~ + +Provides a ``.golangci`` file with the linters we've agreed upon. + +**To Enable:** + +Add ``flyteorg/golangci_file`` to your ``boilerplate/update.cfg`` file. diff --git a/flytectl/boilerplate/flyte/golangci_file/update.sh b/flytectl/boilerplate/flyte/golangci_file/update.sh new file mode 100755 index 0000000000..ab2f85c680 --- /dev/null +++ b/flytectl/boilerplate/flyte/golangci_file/update.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +# Clone the .golangci file +echo " - copying ${DIR}/.golangci to the root directory." +cp ${DIR}/.golangci.yml ${DIR}/../../../.golangci.yml diff --git a/flytectl/boilerplate/flyte/precommit/Makefile b/flytectl/boilerplate/flyte/precommit/Makefile new file mode 100644 index 0000000000..3c6f17d6b2 --- /dev/null +++ b/flytectl/boilerplate/flyte/precommit/Makefile @@ -0,0 +1,9 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + + +.PHONY: setup-precommit +setup-precommit: #setup the precommit + @boilerplate/flyte/precommit/update.sh diff --git a/flytectl/boilerplate/flyte/precommit/hooks/pre-push b/flytectl/boilerplate/flyte/precommit/hooks/pre-push new file mode 100755 index 0000000000..f161cfe856 --- /dev/null +++ b/flytectl/boilerplate/flyte/precommit/hooks/pre-push @@ -0,0 +1,41 @@ +DUMMY_SHA=0000000000000000000000000000000000000000 + +echo "Running pre-push check; to skip this step use 'push --no-verify'" + +while read LOCAL_REF LOCAL_SHA REMOTE_REF REMOTE_SHA +do + if [ "$LOCAL_SHA" = $DUMMY_SHA ] + then + # Branch deleted. Do nothing. + exit 0 + else + if [ "$REMOTE_SHA" = $DUMMY_SHA ] + then + # New branch. Verify the last commit, since this is very likely where the new code is + # (though there is no way to know for sure). In the extremely uncommon case in which someone + # pushes more than 1 new commit to a branch, CI will enforce full checking. + RANGE="$LOCAL_SHA~1..$LOCAL_SHA" + else + # Updating branch. Verify new commits. + RANGE="$REMOTE_SHA..$LOCAL_SHA" + fi + + # Verify DCO signoff. We do this before the format checker, since it has + # some probability of failing spuriously, while this check never should. + # + # In general, we can't assume that the commits are signed off by author + # pushing, so we settle for just checking that there is a signoff at all. + SIGNED_OFF=$(git rev-list --no-merges --grep "^Signed-off-by: " "$RANGE") + NOT_SIGNED_OFF=$(git rev-list --no-merges "$RANGE" | grep -Fxv "$SIGNED_OFF") + if [ -n "$NOT_SIGNED_OFF" ] + then + echo >&2 "ERROR: The following commits do not have DCO signoff:" + while read -r commit; do + echo " $(git log --pretty=oneline --abbrev-commit -n 1 $commit)" + done <<< "$NOT_SIGNED_OFF" + exit 1 + fi + fi +done + +exit 0 diff --git a/flytectl/boilerplate/flyte/precommit/hooks/prepare-commit-msg b/flytectl/boilerplate/flyte/precommit/hooks/prepare-commit-msg new file mode 100755 index 0000000000..8148d104b8 --- /dev/null +++ b/flytectl/boilerplate/flyte/precommit/hooks/prepare-commit-msg @@ -0,0 +1,16 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst +# $ ln -s ../../support/hooks/prepare-commit-msg .git/hooks/prepare-commit-msg + +COMMIT_MESSAGE_FILE="$1" +AUTHOR=$(git var GIT_AUTHOR_IDENT) +SIGNOFF=$(echo $AUTHOR | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') + +# Check for DCO signoff message. If one doesn't exist, append one and then warn +# the user that you did so. +if ! $(grep -qs "^$SIGNOFF" "$COMMIT_MESSAGE_FILE") ; then + echo "\n$SIGNOFF" >> "$COMMIT_MESSAGE_FILE" + echo "Appended the following signoff to the end of the commit message:\n $SIGNOFF\n" +fi diff --git a/flytectl/boilerplate/flyte/precommit/update.sh b/flytectl/boilerplate/flyte/precommit/update.sh new file mode 100755 index 0000000000..971c8386c1 --- /dev/null +++ b/flytectl/boilerplate/flyte/precommit/update.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +set -e + +# Helper script for Automatically add DCO signoff with commit hooks +# Taken from Envoy https://gitlab.cncf.ci/envoyproxy/envoy +if [ ! "$PWD" == "$(git rev-parse --show-toplevel)" ]; then + cat >&2 <<__EOF__ +ERROR: this script must be run at the root of the envoy source tree +__EOF__ + exit 1 +fi + +# Helper functions that calculate `abspath` and `relpath`. Taken from Mesos +# commit 82b040a60561cf94dec3197ea88ae15e57bcaa97, which also carries the Apache +# V2 license, and has deployed this code successfully for some time. +abspath() { + cd "$(dirname "${1}")" + echo "${PWD}"/"$(basename "${1}")" + cd "${OLDPWD}" +} +relpath() { + local FROM TO UP + FROM="$(abspath "${1%/}")" TO="$(abspath "${2%/}"/)" + while test "${TO}" = "${TO#"${FROM}"/}" \ + -a "${TO}" != "${FROM}"; do + FROM="${FROM%/*}" UP="../${UP}" + done + TO="${UP%/}${TO#${FROM}}" + echo "${TO:-.}" +} + +# Try to find the `.git` directory, even if it's not in Flyte project root (as +# it wouldn't be if, say, this were in a submodule). The "blessed" but fairly +# new way to do this is to use `--git-common-dir`. +DOT_GIT_DIR=$(git rev-parse --git-common-dir) +if test ! -d "${DOT_GIT_DIR}"; then + # If `--git-common-dir` is not available, fall back to older way of doing it. + DOT_GIT_DIR=$(git rev-parse --git-dir) +fi + +mkdir -p ${DOT_GIT_DIR}/hooks + +HOOKS_DIR="${DOT_GIT_DIR}/hooks" +HOOKS_DIR_RELPATH=$(relpath "${HOOKS_DIR}" "${PWD}") + +if [ ! -e "${HOOKS_DIR}/prepare-commit-msg" ]; then + echo "Installing hook 'prepare-commit-msg'" + ln -s "${HOOKS_DIR_RELPATH}/boilerplate/flyte/precommit/hooks/prepare-commit-msg" "${HOOKS_DIR}/prepare-commit-msg" +fi + +if [ ! -e "${HOOKS_DIR}/pre-push" ]; then + echo "Installing hook 'pre-push'" + ln -s "${HOOKS_DIR_RELPATH}/boilerplate/flyte/precommit/hooks/pre-push" "${HOOKS_DIR}/pre-push" +fi diff --git a/flytectl/boilerplate/flyte/pull_request_template/Readme.rst b/flytectl/boilerplate/flyte/pull_request_template/Readme.rst new file mode 100644 index 0000000000..ee54437252 --- /dev/null +++ b/flytectl/boilerplate/flyte/pull_request_template/Readme.rst @@ -0,0 +1,8 @@ +Pull Request Template +~~~~~~~~~~~~~~~~~~~~~ + +Provides a Pull Request template. + +**To Enable:** + +Add ``flyteorg/golang_test_targets`` to your ``boilerplate/update.cfg`` file. diff --git a/flytectl/boilerplate/flyte/pull_request_template/pull_request_template.md b/flytectl/boilerplate/flyte/pull_request_template/pull_request_template.md new file mode 100644 index 0000000000..9cdab99b46 --- /dev/null +++ b/flytectl/boilerplate/flyte/pull_request_template/pull_request_template.md @@ -0,0 +1,35 @@ +## _Read then delete this section_ + +_- Make sure to use a concise title for the pull-request._ + +_- Use #patch, #minor or #major in the pull-request title to bump the corresponding version. Otherwise, the patch version +will be bumped. [More details](https://github.com/marketplace/actions/github-tag-bump)_ + +# TL;DR +_Please replace this text with a description of what this PR accomplishes._ + +## Type + - [ ] Bug Fix + - [ ] Feature + - [ ] Plugin + +## Are all requirements met? + + - [ ] Code completed + - [ ] Smoke tested + - [ ] Unit tests added + - [ ] Code documentation added + - [ ] Any pending items have an associated Issue + +## Complete description + _How did you fix the bug, make the feature etc. Link to any design docs etc_ + +## Tracking Issue +_Remove the '*fixes*' keyword if there will be multiple PRs to fix the linked issue_ + +fixes https://github.com/flyteorg/flyte/issues/ + +## Follow-up issue +_NA_ +OR +_https://github.com/flyteorg/flyte/issues/_ diff --git a/flytectl/boilerplate/flyte/pull_request_template/update.sh b/flytectl/boilerplate/flyte/pull_request_template/update.sh new file mode 100755 index 0000000000..051e9dbce0 --- /dev/null +++ b/flytectl/boilerplate/flyte/pull_request_template/update.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +cp ${DIR}/pull_request_template.md ${DIR}/../../../pull_request_template.md diff --git a/flytectl/boilerplate/update.cfg b/flytectl/boilerplate/update.cfg new file mode 100644 index 0000000000..8920ca4162 --- /dev/null +++ b/flytectl/boilerplate/update.cfg @@ -0,0 +1,6 @@ +flyte/golang_test_targets +flyte/golangci_file +flyte/golang_support_tools +flyte/pull_request_template +flyte/precommit +flyte/code_of_conduct diff --git a/flytectl/boilerplate/update.sh b/flytectl/boilerplate/update.sh new file mode 100755 index 0000000000..73de4dc91c --- /dev/null +++ b/flytectl/boilerplate/update.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +OUT="$(mktemp -d)" +trap 'rm -fr $OUT' EXIT + +git clone https://github.com/flyteorg/boilerplate.git "${OUT}" + +echo "Updating the update.sh script." +cp "${OUT}/boilerplate/update.sh" "${DIR}/update.sh" + +CONFIG_FILE="${DIR}/update.cfg" +README="https://github.com/flyteorg/boilerplate/blob/master/Readme.rst" + +if [ ! -f "$CONFIG_FILE" ]; then + echo "$CONFIG_FILE not found." + echo "This file is required in order to select which features to include." + echo "See $README for more details." + exit 1 +fi + +if [ -z "$REPOSITORY" ]; then + echo "$REPOSITORY is required to run this script" + echo "See $README for more details." + exit 1 +fi + +while read -r directory junk; do + # Skip comment lines (which can have leading whitespace) + if [[ "$directory" == '#'* ]]; then + continue + fi + # Skip blank or whitespace-only lines + if [[ "$directory" == "" ]]; then + continue + fi + # Lines like + # valid/path other_junk + # are not acceptable, unless `other_junk` is a comment + if [[ "$junk" != "" ]] && [[ "$junk" != '#'* ]]; then + echo "Invalid config! Only one directory is allowed per line. Found '$junk'" + exit 1 + fi + + dir_path="${OUT}/boilerplate/${directory}" + # Make sure the directory exists + if ! [[ -d "$dir_path" ]]; then + echo "Invalid boilerplate directory: '$directory'" + exit 1 + fi + + echo "***********************************************************************************" + echo "$directory is configured in update.cfg." + echo "-----------------------------------------------------------------------------------" + echo "syncing files from source." + rm -rf "${DIR:?}/${directory}" + mkdir -p "$(dirname "${DIR}"/"${directory}")" + cp -r "$dir_path" "${DIR}/${directory}" + if [ -f "${DIR}/${directory}/update.sh" ]; then + echo "executing ${DIR}/${directory}/update.sh" + "${DIR}/${directory}/update.sh" + fi + echo "***********************************************************************************" + echo "" +done < "$CONFIG_FILE" diff --git a/flytectl/clierrors/errors.go b/flytectl/clierrors/errors.go new file mode 100644 index 0000000000..48fecd3f2f --- /dev/null +++ b/flytectl/clierrors/errors.go @@ -0,0 +1,26 @@ +package clierrors + +var ( + ErrInvalidStateUpdate = "invalid state passed. Specify either activate or archive\n" + ErrInvalidBothStateUpdate = "invalid state passed. Specify either activate or deactivate\n" + + ErrProjectNotPassed = "project id wasn't passed\n" // #nosec + ErrProjectIDBothPassed = "both project and id are passed\n" + ErrProjectNameNotPassed = "project name is a required flag" + ErrFailedProjectUpdate = "Project %v failed to update due to %w\n" + + ErrLPNotPassed = "launch plan name wasn't passed\n" + ErrLPVersionNotPassed = "launch plan version wasn't passed\n" //nolint + ErrFailedLPUpdate = "launch plan %v failed to update due to %w\n" + + ErrExecutionNotPassed = "execution name wasn't passed\n" + ErrFailedExecutionUpdate = "execution %v failed to update due to %v\n" + + ErrWorkflowNotPassed = "workflow name wasn't passed\n" + ErrFailedWorkflowUpdate = "workflow %v failed to update to due to %v\n" + + ErrTaskNotPassed = "task name wasn't passed\n" // #nosec + ErrFailedTaskUpdate = "task %v failed to update to due to %v\n" + + ErrSandboxExists = "sandbox already exists!\n" +) diff --git a/flytectl/cmd/compile/compile.go b/flytectl/cmd/compile/compile.go new file mode 100644 index 0000000000..035ea786e5 --- /dev/null +++ b/flytectl/cmd/compile/compile.go @@ -0,0 +1,147 @@ +package compile + +import ( + "context" + "fmt" + "io/ioutil" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytepropeller/pkg/compiler" + "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/common" + config "github.com/flyteorg/flytectl/cmd/config/subcommand/compile" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/cmd/register" +) + +// Utility function for compiling a list of Tasks +func compileTasks(tasks []*core.TaskTemplate) ([]*core.CompiledTask, error) { + res := make([]*core.CompiledTask, 0, len(tasks)) + for _, task := range tasks { + compiledTask, err := compiler.CompileTask(task) + if err != nil { + return nil, err + } + res = append(res, compiledTask) + } + return res, nil +} + +/* +Utility to compile a packaged workflow locally. +compilation is done locally so no flyte cluster is required. +*/ +func compileFromPackage(packagePath string) error { + args := []string{packagePath} + fileList, tmpDir, err := register.GetSerializeOutputFiles(context.Background(), args, true) + defer os.RemoveAll(tmpDir) + if err != nil { + fmt.Println("Error found while extracting package..") + return err + } + fmt.Println("Successfully extracted package...") + fmt.Println("Processing Protobuf files...") + workflows := make(map[string]*admin.WorkflowSpec) + plans := make(map[string]*admin.LaunchPlan) + tasks := []*admin.TaskSpec{} + + for _, pbFilePath := range fileList { + rawTsk, err := ioutil.ReadFile(pbFilePath) + if err != nil { + fmt.Printf("error unmarshalling task..") + return err + } + spec, err := register.UnMarshalContents(context.Background(), rawTsk, pbFilePath) + if err != nil { + return err + } + + switch v := spec.(type) { + case *admin.TaskSpec: + tasks = append(tasks, v) + case *admin.WorkflowSpec: + workflows[v.Template.Id.Name] = v + case *admin.LaunchPlan: + plans[v.Id.Name] = v + } + } + + // compile tasks + taskTemplates := []*core.TaskTemplate{} + for _, task := range tasks { + taskTemplates = append(taskTemplates, task.Template) + } + + fmt.Println("\nCompiling tasks...") + compiledTasks, err := compileTasks(taskTemplates) + if err != nil { + fmt.Println("Error while compiling tasks...") + return err + } + + // compile workflows + for wfName, workflow := range workflows { + + fmt.Println("\nCompiling workflow:", wfName) + plan := plans[wfName] + + _, err := compiler.CompileWorkflow(workflow.Template, + workflow.SubWorkflows, + compiledTasks, + []common.InterfaceProvider{compiler.NewLaunchPlanInterfaceProvider(*plan)}) + if err != nil { + fmt.Println(":( Error Compiling workflow:", wfName) + return err + } + + } + + fmt.Println("All Workflows compiled successfully!") + fmt.Println("\nSummary:") + fmt.Println(len(workflows), " workflows found in package") + fmt.Println(len(tasks), " Tasks found in package") + fmt.Println(len(plans), " Launch plans found in package") + return nil +} + +const ( + compileShort = `Validate flyte packages without registration needed.` + compileLong = ` +Validate workflows by compiling flyte's serialized protobuf files (task, workflows and launch plans). This is useful for testing workflows and tasks without neededing to talk with a flyte cluster. + +:: + + flytectl compile --file my-flyte-package.tgz + +:: + + flytectl compile --file /home/user/dags/my-flyte-package.tgz + +.. note:: + Input file is a path to a tgz. This file is generated by either pyflyte or jflyte. tgz file contains protobuf files describing workflows, tasks and launch plans. + +` +) + +func compile(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + packageFilePath := config.DefaultCompileConfig.File + if packageFilePath == "" { + return fmt.Errorf("path to package tgz's file is a required flag") + } + return compileFromPackage(packageFilePath) +} + +func CreateCompileCommand() map[string]cmdCore.CommandEntry { + compileResourcesFuncs := map[string]cmdCore.CommandEntry{ + "compile": { + Short: compileShort, + Long: compileLong, + CmdFunc: compile, + PFlagProvider: config.DefaultCompileConfig, + ProjectDomainNotRequired: true, + DisableFlyteClient: true, + }, + } + return compileResourcesFuncs +} diff --git a/flytectl/cmd/compile/compile_test.go b/flytectl/cmd/compile/compile_test.go new file mode 100644 index 0000000000..83c555964c --- /dev/null +++ b/flytectl/cmd/compile/compile_test.go @@ -0,0 +1,71 @@ +package compile + +import ( + "context" + "testing" + + config "github.com/flyteorg/flytectl/cmd/config/subcommand/compile" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + u "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +func TestCompileCommand(t *testing.T) { + rootCmd := &cobra.Command{ + Long: "Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service.", + Short: "Flytectl CLI tool", + Use: "flytectl", + DisableAutoGenTag: true, + } + compileCommand := CreateCompileCommand() + cmdCore.AddCommands(rootCmd, compileCommand) + cmdNouns := rootCmd.Commands() + assert.Equal(t, cmdNouns[0].Use, "compile") + assert.Equal(t, cmdNouns[0].Flags().Lookup("file").Name, "file") + // check shorthand + assert.Equal(t, cmdNouns[0].Short, compileShort) + + // compiling via cobra command + compileCfg := config.DefaultCompileConfig + compileCfg.File = "testdata/valid-package.tgz" + var setup = u.Setup + s := setup() + compileCmd := CreateCompileCommand()["compile"] + err := compileCmd.CmdFunc(context.Background(), []string{}, s.CmdCtx) + assert.Nil(t, err, "compiling via cmd returns err") + + // calling command with empty file flag + compileCfg = config.DefaultCompileConfig + compileCfg.File = "" + err = compileCmd.CmdFunc(context.Background(), []string{}, s.CmdCtx) + assert.NotNil(t, err, "calling compile with Empty file flag does not error") +} + +func TestCompilePackage(t *testing.T) { + // valid package contains two workflows + // with three tasks + err := compileFromPackage("testdata/valid-package.tgz") + assert.Nil(t, err, "unable to compile a valid package") + + // invalid gzip header + err = compileFromPackage("testdata/invalid.tgz") + assert.NotNil(t, err, "compiling an invalid package returns no error") + + // invalid workflow, types do not match + err = compileFromPackage("testdata/bad-workflow-package.tgz") + assert.NotNil(t, err, "compilin an invalid workflow returns no error") + + // testing badly serialized task + err = compileFromPackage("testdata/invalidtask.tgz") + assert.NotNil(t, err, "unable to handle invalid task") + + // testing badly serialized launchplan + err = compileFromPackage("testdata/invalidlaunchplan.tgz") + assert.NotNil(t, err, "unable to handle invalid launchplan") + + // testing badly serialized workflow + err = compileFromPackage("testdata/invalidworkflow.tgz") + assert.NotNil(t, err, "unable to handle invalid workflow") + +} diff --git a/flytectl/cmd/compile/testdata/bad-workflow-package.tgz b/flytectl/cmd/compile/testdata/bad-workflow-package.tgz new file mode 100644 index 0000000000..183be1bae5 Binary files /dev/null and b/flytectl/cmd/compile/testdata/bad-workflow-package.tgz differ diff --git a/flytectl/cmd/compile/testdata/invalid.tgz b/flytectl/cmd/compile/testdata/invalid.tgz new file mode 100644 index 0000000000..3f37575e6a --- /dev/null +++ b/flytectl/cmd/compile/testdata/invalid.tgz @@ -0,0 +1 @@ +invalid tgz file diff --git a/flytectl/cmd/compile/testdata/invalidlaunchplan.tgz b/flytectl/cmd/compile/testdata/invalidlaunchplan.tgz new file mode 100644 index 0000000000..650a0d5a92 Binary files /dev/null and b/flytectl/cmd/compile/testdata/invalidlaunchplan.tgz differ diff --git a/flytectl/cmd/compile/testdata/invalidtask.tgz b/flytectl/cmd/compile/testdata/invalidtask.tgz new file mode 100644 index 0000000000..cb129ab160 Binary files /dev/null and b/flytectl/cmd/compile/testdata/invalidtask.tgz differ diff --git a/flytectl/cmd/compile/testdata/invalidworkflow.tgz b/flytectl/cmd/compile/testdata/invalidworkflow.tgz new file mode 100644 index 0000000000..4192456119 Binary files /dev/null and b/flytectl/cmd/compile/testdata/invalidworkflow.tgz differ diff --git a/flytectl/cmd/compile/testdata/valid-package.tgz b/flytectl/cmd/compile/testdata/valid-package.tgz new file mode 100644 index 0000000000..e877b96630 Binary files /dev/null and b/flytectl/cmd/compile/testdata/valid-package.tgz differ diff --git a/flytectl/cmd/completion.go b/flytectl/cmd/completion.go new file mode 100644 index 0000000000..b404cb57dd --- /dev/null +++ b/flytectl/cmd/completion.go @@ -0,0 +1,107 @@ +/* +Copyright ยฉ 2021 NAME HERE + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" +) + +// completionCmd represents the completion command +var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generates completion script.", + Long: `To load completion, run the following commands in accordance with the shell you are using: + +- Bash + :: + + $ source <(flytectl completion bash) + + To load completions for each session: + + - Linux + + :: + + $ flytectl completion bash > /etc/bash_completion.d/flytectl + + - macOS + + :: + + $ flytectl completion bash > /usr/local/etc/bash_completion.d/flytectl + +- Zsh + If shell completion is not already enabled in your environment, enable it: + + :: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + Once enabled, execute once: + + :: + + $ flytectl completion zsh > "${fpath[1]}/_flytectl" + + .. note:: + Start a new shell for this setup to take effect. + +- fish + + :: + + $ flytectl completion fish | source + + To load completions for each session, run: + + :: + + $ flytectl completion fish > ~/.config/fish/completions/flytectl.fish + +- PowerShell + + :: + + PS> flytectl completion powershell | Out-String | Invoke-Expression + + To load completions for each session, run: + + :: + + PS> flytectl completion powershell > flytectl.ps1 + + and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + switch args[0] { + case "bash": + return cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + return cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + return cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + return nil + }, +} diff --git a/flytectl/cmd/completion_test.go b/flytectl/cmd/completion_test.go new file mode 100644 index 0000000000..99aa21a851 --- /dev/null +++ b/flytectl/cmd/completion_test.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +func TestCompletionCmdIntegration(t *testing.T) { + rootCmd := &cobra.Command{ + Long: "Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service", + Short: "Flytectl CLI tool", + Use: "flytectl", + DisableAutoGenTag: true, + } + + err := completionCmd.RunE(rootCmd, []string{"bash"}) + assert.Nil(t, err) + err = completionCmd.RunE(rootCmd, []string{"zsh"}) + assert.Nil(t, err) + err = completionCmd.RunE(rootCmd, []string{"fish"}) + assert.Nil(t, err) + err = completionCmd.RunE(rootCmd, []string{"powershell"}) + assert.Nil(t, err) +} diff --git a/flytectl/cmd/config/config.go b/flytectl/cmd/config/config.go new file mode 100644 index 0000000000..76b4b7c880 --- /dev/null +++ b/flytectl/cmd/config/config.go @@ -0,0 +1,45 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/flyteorg/flyte/flytestdlib/config" + + "github.com/flyteorg/flytectl/pkg/printer" +) + +var ( + defaultConfig = &Config{ + Output: printer.OutputFormatTABLE.String(), + } + + section = config.MustRegisterSection("root", defaultConfig) +) + +// Config hold configration for flytectl flag +type Config struct { + Project string `json:"project" pflag:",Specifies the project to work on."` + Domain string `json:"domain" pflag:",Specifies the domain to work on."` + Output string `json:"output" pflag:",Specifies the output type."` + Interactive bool `json:"interactive" pflag:",Set this to trigger bubbletea interface."` +} + +// OutputFormat will return output formate +func (cfg Config) OutputFormat() (printer.OutputFormat, error) { + return printer.OutputFormatString(strings.ToUpper(cfg.Output)) +} + +// MustOutputFormat will validate the supported output formate and return output formate +func (cfg Config) MustOutputFormat() printer.OutputFormat { + f, err := cfg.OutputFormat() + if err != nil { + panic(fmt.Sprintf("unsupported output format [%s], supported types %s", cfg.Output, printer.OutputFormats())) + } + return f +} + +// GetConfig will return the config +func GetConfig() *Config { + return section.GetConfig().(*Config) +} diff --git a/flytectl/cmd/config/config_test.go b/flytectl/cmd/config/config_test.go new file mode 100644 index 0000000000..f11ee07ae7 --- /dev/null +++ b/flytectl/cmd/config/config_test.go @@ -0,0 +1,32 @@ +package config + +import ( + "testing" + + "github.com/flyteorg/flytectl/pkg/printer" + "github.com/stretchr/testify/assert" +) + +func TestOutputFormat(t *testing.T) { + c := &Config{ + Output: "json", + } + result, err := c.OutputFormat() + assert.Nil(t, err) + assert.Equal(t, printer.OutputFormat(1), result) +} + +func TestInvalidOutputFormat(t *testing.T) { + c := &Config{ + Output: "test", + } + var result printer.OutputFormat + defer func() { + if r := recover(); r != nil { + assert.Equal(t, printer.OutputFormat(0), result) + assert.NotNil(t, r) + } + }() + result = c.MustOutputFormat() + +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..80c5873993 --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..fcf1f72f0c --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags.go new file mode 100755 index 0000000000..47dfb88623 --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..dbcbb814fb --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrfetchconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags.go new file mode 100755 index 0000000000..f0f8103f12 --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..9396a2e9e4 --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package clusterresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/delete_config.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/delete_config.go new file mode 100644 index 0000000000..e3dab991bf --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/delete_config.go @@ -0,0 +1,11 @@ +package clusterresourceattribute + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/fetch_config.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/fetch_config.go new file mode 100644 index 0000000000..88afae90ac --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/fetch_config.go @@ -0,0 +1,9 @@ +package clusterresourceattribute + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config.go new file mode 100644 index 0000000000..1e9543981d --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config.go @@ -0,0 +1,47 @@ +package clusterresourceattribute + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// AttrFileConfig shadow Config for ClusterResourceAttributes. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type AttrFileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.ClusterResourceAttributes +} + +// Decorate decorator over ClusterResourceAttributes. +func (c AttrFileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: c.ClusterResourceAttributes, + }, + } +} + +// UnDecorate to uncover ClusterResourceAttributes. +func (c *AttrFileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + c.ClusterResourceAttributes = matchingAttribute.GetClusterResourceAttributes() +} + +// GetProject from the AttrFileConfig +func (c AttrFileConfig) GetProject() string { + return c.Project +} + +// GetDomain from the AttrFileConfig +func (c AttrFileConfig) GetDomain() string { + return c.Domain +} + +// GetWorkflow from the AttrFileConfig +func (c AttrFileConfig) GetWorkflow() string { + return c.Workflow +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config_test.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config_test.go new file mode 100644 index 0000000000..92b791cf0a --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/file_config_test.go @@ -0,0 +1,46 @@ +package clusterresourceattribute + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + clusterAttrFileConfig := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + ClusterResourceAttributes: &admin.ClusterResourceAttributes{ + Attributes: map[string]string{"foo": "bar"}, + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: clusterAttrFileConfig.ClusterResourceAttributes, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, clusterAttrFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + clusterAttrFileConfigNew := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + clusterAttrFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, clusterAttrFileConfig, clusterAttrFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + clusterAttrFileConfigNew := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", clusterAttrFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", clusterAttrFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", clusterAttrFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/clusterresourceattribute/update_config.go b/flytectl/cmd/config/subcommand/clusterresourceattribute/update_config.go new file mode 100644 index 0000000000..9b4534eed7 --- /dev/null +++ b/flytectl/cmd/config/subcommand/clusterresourceattribute/update_config.go @@ -0,0 +1,12 @@ +package clusterresourceattribute + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/config/subcommand/compile/compile_config.go b/flytectl/cmd/config/subcommand/compile/compile_config.go new file mode 100644 index 0000000000..6fe24bca41 --- /dev/null +++ b/flytectl/cmd/config/subcommand/compile/compile_config.go @@ -0,0 +1,11 @@ +package compile + +//go:generate pflags Config --default-var DefaultCompileConfig --bind-default-var +var ( + DefaultCompileConfig = &Config{} +) + +// Config stores the flags required by compile command +type Config struct { + File string `json:"file" pflag:",Path to a flyte package file. Flyte packages are tgz files generated by pyflyte or jflyte."` +} diff --git a/flytectl/cmd/config/subcommand/compile/config_flags.go b/flytectl/cmd/config/subcommand/compile/config_flags.go new file mode 100755 index 0000000000..4e826f1b67 --- /dev/null +++ b/flytectl/cmd/config/subcommand/compile/config_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package compile + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultCompileConfig.File, fmt.Sprintf("%v%v", prefix, "file"), DefaultCompileConfig.File, "Path to a flyte package file. Flyte packages are tgz files generated by pyflyte or jflyte.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/compile/config_flags_test.go b/flytectl/cmd/config/subcommand/compile/config_flags_test.go new file mode 100755 index 0000000000..be3845ec1a --- /dev/null +++ b/flytectl/cmd/config/subcommand/compile/config_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package compile + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_file", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("file", testValue) + if vString, err := cmdFlags.GetString("file"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.File) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/config/config_flags.go b/flytectl/cmd/config/subcommand/config/config_flags.go new file mode 100755 index 0000000000..c3d727a3e2 --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/config_flags.go @@ -0,0 +1,58 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.Console, fmt.Sprintf("%v%v", prefix, "console"), DefaultConfig.Console, "Endpoint of console, if different than flyte admin") + cmdFlags.StringVar(&DefaultConfig.Host, fmt.Sprintf("%v%v", prefix, "host"), DefaultConfig.Host, "Endpoint of flyte admin") + cmdFlags.BoolVar(&DefaultConfig.Insecure, fmt.Sprintf("%v%v", prefix, "insecure"), DefaultConfig.Insecure, "Enable insecure mode") + cmdFlags.BoolVar(&DefaultConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultConfig.Force, "Force to overwrite the default config file without confirmation") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/config/config_flags_test.go b/flytectl/cmd/config/subcommand/config/config_flags_test.go new file mode 100755 index 0000000000..51f52abb86 --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/config_flags_test.go @@ -0,0 +1,158 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_console", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("console", testValue) + if vString, err := cmdFlags.GetString("console"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Console) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_host", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("host", testValue) + if vString, err := cmdFlags.GetString("host"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Host) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_insecure", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("insecure", testValue) + if vBool, err := cmdFlags.GetBool("insecure"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Insecure) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/config/console_flags.go b/flytectl/cmd/config/subcommand/config/console_flags.go new file mode 100644 index 0000000000..168aca95c2 --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/console_flags.go @@ -0,0 +1,20 @@ +package config + +import "github.com/flyteorg/flyte/flytestdlib/config" + +//go:generate pflags ConsoleConfig --default-var DefaultConsoleConfig --bind-default-var + +var ( + DefaultConsoleConfig = &ConsoleConfig{} + + cfg = config.MustRegisterSection("console", DefaultConsoleConfig) +) + +// FilesConfig containing flags used for registration +type ConsoleConfig struct { + Endpoint string `json:"endpoint" pflag:",Endpoint of console, if different than flyte admin"` +} + +func GetConfig() *ConsoleConfig { + return cfg.GetConfig().(*ConsoleConfig) +} diff --git a/flytectl/cmd/config/subcommand/config/consoleconfig_flags.go b/flytectl/cmd/config/subcommand/config/consoleconfig_flags.go new file mode 100755 index 0000000000..4135f44009 --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/consoleconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (ConsoleConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (ConsoleConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (ConsoleConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in ConsoleConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg ConsoleConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("ConsoleConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConsoleConfig.Endpoint, fmt.Sprintf("%v%v", prefix, "endpoint"), DefaultConsoleConfig.Endpoint, "Endpoint of console, if different than flyte admin") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/config/consoleconfig_flags_test.go b/flytectl/cmd/config/subcommand/config/consoleconfig_flags_test.go new file mode 100755 index 0000000000..77cf7ad56c --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/consoleconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConsoleConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConsoleConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConsoleConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConsoleConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConsoleConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_ConsoleConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConsoleConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_ConsoleConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_ConsoleConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_ConsoleConfig(val, result)) +} + +func testDecodeRaw_ConsoleConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_ConsoleConfig(vStringSlice, result)) +} + +func TestConsoleConfig_GetPFlagSet(t *testing.T) { + val := ConsoleConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConsoleConfig_SetFlags(t *testing.T) { + actual := ConsoleConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_endpoint", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("endpoint", testValue) + if vString, err := cmdFlags.GetString("endpoint"); err == nil { + testDecodeJson_ConsoleConfig(t, fmt.Sprintf("%v", vString), &actual.Endpoint) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/config/init_flags.go b/flytectl/cmd/config/subcommand/config/init_flags.go new file mode 100755 index 0000000000..cbb8e60be5 --- /dev/null +++ b/flytectl/cmd/config/subcommand/config/init_flags.go @@ -0,0 +1,17 @@ +package config + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Insecure: false, + Force: false, + } +) + +// Configs +type Config struct { + Console string `json:"console" pflag:",Endpoint of console, if different than flyte admin"` + Host string `json:"host" pflag:",Endpoint of flyte admin"` + Insecure bool `json:"insecure" pflag:",Enable insecure mode"` + Force bool `json:"force" pflag:",Force to overwrite the default config file without confirmation"` +} diff --git a/flytectl/cmd/config/subcommand/docker/config_flags.go b/flytectl/cmd/config/subcommand/docker/config_flags.go new file mode 100644 index 0000000000..6c2a9bcea5 --- /dev/null +++ b/flytectl/cmd/config/subcommand/docker/config_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package docker + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.BoolVar(&DefaultConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultConfig.Force, "Optional. Forcefully delete existing sandbox cluster if it exists.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/docker/config_flags_test.go b/flytectl/cmd/config/subcommand/docker/config_flags_test.go new file mode 100644 index 0000000000..e1efe4a644 --- /dev/null +++ b/flytectl/cmd/config/subcommand/docker/config_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package docker + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/docker/docker_config.go b/flytectl/cmd/config/subcommand/docker/docker_config.go new file mode 100644 index 0000000000..17d8c78157 --- /dev/null +++ b/flytectl/cmd/config/subcommand/docker/docker_config.go @@ -0,0 +1,13 @@ +package docker + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Force: false, + } +) + +// Configs +type Config struct { + Force bool `json:"force" pflag:",Optional. Forcefully delete existing sandbox cluster if it exists."` +} diff --git a/flytectl/cmd/config/subcommand/execution/config_flags.go b/flytectl/cmd/config/subcommand/execution/config_flags.go new file mode 100755 index 0000000000..9f47f721f9 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/config_flags.go @@ -0,0 +1,61 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.Filter.FieldSelector, fmt.Sprintf("%v%v", prefix, "filter.fieldSelector"), DefaultConfig.Filter.FieldSelector, "Specifies the Field selector") + cmdFlags.StringVar(&DefaultConfig.Filter.SortBy, fmt.Sprintf("%v%v", prefix, "filter.sortBy"), DefaultConfig.Filter.SortBy, "Specifies which field to sort results ") + cmdFlags.Int32Var(&DefaultConfig.Filter.Limit, fmt.Sprintf("%v%v", prefix, "filter.limit"), DefaultConfig.Filter.Limit, "Specifies the limit") + cmdFlags.BoolVar(&DefaultConfig.Filter.Asc, fmt.Sprintf("%v%v", prefix, "filter.asc"), DefaultConfig.Filter.Asc, "Specifies the sorting order. By default flytectl sort result in descending order") + cmdFlags.Int32Var(&DefaultConfig.Filter.Page, fmt.Sprintf("%v%v", prefix, "filter.page"), DefaultConfig.Filter.Page, "Specifies the page number, in case there are multiple pages of results") + cmdFlags.BoolVar(&DefaultConfig.Details, fmt.Sprintf("%v%v", prefix, "details"), DefaultConfig.Details, "gets node execution details. Only applicable for single execution name i.e get execution name --details") + cmdFlags.StringVar(&DefaultConfig.NodeID, fmt.Sprintf("%v%v", prefix, "nodeID"), DefaultConfig.NodeID, "get task executions for given node name.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/execution/config_flags_test.go b/flytectl/cmd/config/subcommand/execution/config_flags_test.go new file mode 100755 index 0000000000..ea72babe75 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/config_flags_test.go @@ -0,0 +1,200 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_filter.fieldSelector", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.fieldSelector", testValue) + if vString, err := cmdFlags.GetString("filter.fieldSelector"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.FieldSelector) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.sortBy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.sortBy", testValue) + if vString, err := cmdFlags.GetString("filter.sortBy"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.SortBy) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.limit", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.limit", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.limit"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Limit) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.asc", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.asc", testValue) + if vBool, err := cmdFlags.GetBool("filter.asc"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Filter.Asc) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.page", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.page", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.page"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Page) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_details", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("details", testValue) + if vBool, err := cmdFlags.GetBool("details"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Details) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_nodeID", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("nodeID", testValue) + if vString, err := cmdFlags.GetString("nodeID"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.NodeID) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/execution/delete_config.go b/flytectl/cmd/config/subcommand/execution/delete_config.go new file mode 100644 index 0000000000..f5581f2bdf --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/delete_config.go @@ -0,0 +1,10 @@ +package execution + +//go:generate pflags ExecDeleteConfig --default-var DefaultExecDeleteConfig --bind-default-var + +var DefaultExecDeleteConfig = &ExecDeleteConfig{} + +// ExecutionDeleteConfig stores the flags required by delete execution +type ExecDeleteConfig struct { + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} diff --git a/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags.go new file mode 100755 index 0000000000..b882734811 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (ExecDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (ExecDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (ExecDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in ExecDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg ExecDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("ExecDeleteConfig", pflag.ExitOnError) + cmdFlags.BoolVar(&DefaultExecDeleteConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultExecDeleteConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags_test.go new file mode 100755 index 0000000000..c4d58d13f8 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/execdeleteconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsExecDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementExecDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsExecDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookExecDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementExecDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_ExecDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookExecDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_ExecDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_ExecDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_ExecDeleteConfig(val, result)) +} + +func testDecodeRaw_ExecDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_ExecDeleteConfig(vStringSlice, result)) +} + +func TestExecDeleteConfig_GetPFlagSet(t *testing.T) { + val := ExecDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestExecDeleteConfig_SetFlags(t *testing.T) { + actual := ExecDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_ExecDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/execution/execution_config.go b/flytectl/cmd/config/subcommand/execution/execution_config.go new file mode 100644 index 0000000000..e7a203ea2b --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/execution_config.go @@ -0,0 +1,19 @@ +package execution + +import ( + "github.com/flyteorg/flytectl/pkg/filters" +) + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Filter: filters.DefaultFilter, + } +) + +// Config stores the flags required by get execution +type Config struct { + Filter filters.Filters `json:"filter" pflag:","` + Details bool `json:"details" pflag:",gets node execution details. Only applicable for single execution name i.e get execution name --details"` + NodeID string `json:"nodeID" pflag:",get task executions for given node name."` +} diff --git a/flytectl/cmd/config/subcommand/execution/update_config.go b/flytectl/cmd/config/subcommand/execution/update_config.go new file mode 100644 index 0000000000..7d55a5e9b0 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/update_config.go @@ -0,0 +1,14 @@ +package execution + +//go:generate pflags UpdateConfig --default-var UConfig --bind-default-var +var ( + UConfig = &UpdateConfig{} +) + +// UpdateConfig +type UpdateConfig struct { + Archive bool `json:"archive" pflag:",archive execution."` + Activate bool `json:"activate" pflag:",activate execution."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} diff --git a/flytectl/cmd/config/subcommand/execution/updateconfig_flags.go b/flytectl/cmd/config/subcommand/execution/updateconfig_flags.go new file mode 100755 index 0000000000..a1b251c18d --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/updateconfig_flags.go @@ -0,0 +1,58 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (UpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (UpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (UpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in UpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg UpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("UpdateConfig", pflag.ExitOnError) + cmdFlags.BoolVar(&UConfig.Archive, fmt.Sprintf("%v%v", prefix, "archive"), UConfig.Archive, "archive execution.") + cmdFlags.BoolVar(&UConfig.Activate, fmt.Sprintf("%v%v", prefix, "activate"), UConfig.Activate, "activate execution.") + cmdFlags.BoolVar(&UConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), UConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&UConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), UConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/execution/updateconfig_flags_test.go b/flytectl/cmd/config/subcommand/execution/updateconfig_flags_test.go new file mode 100755 index 0000000000..2e6c693ea8 --- /dev/null +++ b/flytectl/cmd/config/subcommand/execution/updateconfig_flags_test.go @@ -0,0 +1,158 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package execution + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_UpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_UpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_UpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_UpdateConfig(val, result)) +} + +func testDecodeRaw_UpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_UpdateConfig(vStringSlice, result)) +} + +func TestUpdateConfig_GetPFlagSet(t *testing.T) { + val := UpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestUpdateConfig_SetFlags(t *testing.T) { + actual := UpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_archive", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archive", testValue) + if vBool, err := cmdFlags.GetBool("archive"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Archive) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_activate", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("activate", testValue) + if vBool, err := cmdFlags.GetBool("activate"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Activate) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..593582469a --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..573e5937b3 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags.go new file mode 100755 index 0000000000..97bf9e8ff1 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..495e5024d6 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrfetchconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags.go new file mode 100755 index 0000000000..979e08ea50 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..e2a8fe2a2e --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionclusterlabel + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/delete_config.go b/flytectl/cmd/config/subcommand/executionclusterlabel/delete_config.go new file mode 100644 index 0000000000..b0388ed0d5 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/delete_config.go @@ -0,0 +1,11 @@ +package executionclusterlabel + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/fetch_config.go b/flytectl/cmd/config/subcommand/executionclusterlabel/fetch_config.go new file mode 100644 index 0000000000..68a27ea6fb --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/fetch_config.go @@ -0,0 +1,9 @@ +package executionclusterlabel + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/file_config.go b/flytectl/cmd/config/subcommand/executionclusterlabel/file_config.go new file mode 100644 index 0000000000..39dfc2b570 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/file_config.go @@ -0,0 +1,47 @@ +package executionclusterlabel + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// FileConfig shadow Config for ExecutionClusterLabel. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type FileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.ExecutionClusterLabel +} + +// Decorate decorator over ExecutionClusterLabel. +func (t FileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: t.ExecutionClusterLabel, + }, + } +} + +// UnDecorate to uncover ExecutionClusterLabel. +func (t *FileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + t.ExecutionClusterLabel = matchingAttribute.GetExecutionClusterLabel() +} + +// GetProject from the FileConfig +func (t FileConfig) GetProject() string { + return t.Project +} + +// GetDomain from the FileConfig +func (t FileConfig) GetDomain() string { + return t.Domain +} + +// GetWorkflow from the FileConfig +func (t FileConfig) GetWorkflow() string { + return t.Workflow +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/file_config_test.go b/flytectl/cmd/config/subcommand/executionclusterlabel/file_config_test.go new file mode 100644 index 0000000000..79ea059930 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/file_config_test.go @@ -0,0 +1,46 @@ +package executionclusterlabel + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + execClusterLabelFileConfig := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + ExecutionClusterLabel: &admin.ExecutionClusterLabel{ + Value: "foo", + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: execClusterLabelFileConfig.ExecutionClusterLabel, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, execClusterLabelFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + taskAttrFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + taskAttrFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, execClusterLabelFileConfig, taskAttrFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + taskAttrFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", taskAttrFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", taskAttrFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", taskAttrFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionclusterlabel/update_config.go b/flytectl/cmd/config/subcommand/executionclusterlabel/update_config.go new file mode 100644 index 0000000000..be0de45cef --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionclusterlabel/update_config.go @@ -0,0 +1,12 @@ +package executionclusterlabel + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..a174e908c0 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..a2ca3c8004 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags.go new file mode 100755 index 0000000000..05a277f93a --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..73e37ab447 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrfetchconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags.go new file mode 100755 index 0000000000..7643a98017 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..82c697d17a --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package executionqueueattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/delete_config.go b/flytectl/cmd/config/subcommand/executionqueueattribute/delete_config.go new file mode 100644 index 0000000000..c66a5d72e8 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/delete_config.go @@ -0,0 +1,11 @@ +package executionqueueattribute + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/fetch_config.go b/flytectl/cmd/config/subcommand/executionqueueattribute/fetch_config.go new file mode 100644 index 0000000000..62a76d6ba1 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/fetch_config.go @@ -0,0 +1,9 @@ +package executionqueueattribute + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/file_config.go b/flytectl/cmd/config/subcommand/executionqueueattribute/file_config.go new file mode 100644 index 0000000000..cd538da67b --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/file_config.go @@ -0,0 +1,47 @@ +package executionqueueattribute + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// AttrFileConfig shadow Config for ExecutionQueueAttributes. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type AttrFileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.ExecutionQueueAttributes +} + +// Decorate decorator over ExecutionQueueAttributes. +func (a AttrFileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: a.ExecutionQueueAttributes, + }, + } +} + +// UnDecorate to uncover ExecutionQueueAttributes. +func (a *AttrFileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + a.ExecutionQueueAttributes = matchingAttribute.GetExecutionQueueAttributes() +} + +// GetProject from the AttrFileConfig +func (a AttrFileConfig) GetProject() string { + return a.Project +} + +// GetDomain from the AttrFileConfig +func (a AttrFileConfig) GetDomain() string { + return a.Domain +} + +// GetWorkflow from the AttrFileConfig +func (a AttrFileConfig) GetWorkflow() string { + return a.Workflow +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/file_config_test.go b/flytectl/cmd/config/subcommand/executionqueueattribute/file_config_test.go new file mode 100644 index 0000000000..191cabb023 --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/file_config_test.go @@ -0,0 +1,46 @@ +package executionqueueattribute + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + executionQueueAttrFileConfig := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + ExecutionQueueAttributes: &admin.ExecutionQueueAttributes{ + Tags: []string{"foo", "bar"}, + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: executionQueueAttrFileConfig.ExecutionQueueAttributes, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, executionQueueAttrFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + executionAttrFileConfigNew := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + executionAttrFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, executionQueueAttrFileConfig, executionAttrFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + executionQueueAttrFileConfigNew := AttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", executionQueueAttrFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", executionQueueAttrFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", executionQueueAttrFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/executionqueueattribute/update_config.go b/flytectl/cmd/config/subcommand/executionqueueattribute/update_config.go new file mode 100644 index 0000000000..65dd680d5a --- /dev/null +++ b/flytectl/cmd/config/subcommand/executionqueueattribute/update_config.go @@ -0,0 +1,12 @@ +package executionqueueattribute + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/config/subcommand/launchplan/config_flags.go b/flytectl/cmd/config/subcommand/launchplan/config_flags.go new file mode 100755 index 0000000000..dfac4c53ea --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/config_flags.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package launchplan + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.ExecFile, fmt.Sprintf("%v%v", prefix, "execFile"), DefaultConfig.ExecFile, "execution file name to be used for generating execution spec of a single launchplan.") + cmdFlags.StringVar(&DefaultConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), DefaultConfig.Version, "version of the launchplan to be fetched.") + cmdFlags.BoolVar(&DefaultConfig.Latest, fmt.Sprintf("%v%v", prefix, "latest"), DefaultConfig.Latest, " flag to indicate to fetch the latest version, version flag will be ignored in this case") + cmdFlags.StringVar(&DefaultConfig.Filter.FieldSelector, fmt.Sprintf("%v%v", prefix, "filter.fieldSelector"), DefaultConfig.Filter.FieldSelector, "Specifies the Field selector") + cmdFlags.StringVar(&DefaultConfig.Filter.SortBy, fmt.Sprintf("%v%v", prefix, "filter.sortBy"), DefaultConfig.Filter.SortBy, "Specifies which field to sort results ") + cmdFlags.Int32Var(&DefaultConfig.Filter.Limit, fmt.Sprintf("%v%v", prefix, "filter.limit"), DefaultConfig.Filter.Limit, "Specifies the limit") + cmdFlags.BoolVar(&DefaultConfig.Filter.Asc, fmt.Sprintf("%v%v", prefix, "filter.asc"), DefaultConfig.Filter.Asc, "Specifies the sorting order. By default flytectl sort result in descending order") + cmdFlags.Int32Var(&DefaultConfig.Filter.Page, fmt.Sprintf("%v%v", prefix, "filter.page"), DefaultConfig.Filter.Page, "Specifies the page number, in case there are multiple pages of results") + cmdFlags.StringVar(&DefaultConfig.Workflow, fmt.Sprintf("%v%v", prefix, "workflow"), DefaultConfig.Workflow, "name of the workflow for which the launchplans need to be fetched.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/launchplan/config_flags_test.go b/flytectl/cmd/config/subcommand/launchplan/config_flags_test.go new file mode 100755 index 0000000000..b46c8d104f --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/config_flags_test.go @@ -0,0 +1,228 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package launchplan + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_execFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("execFile", testValue) + if vString, err := cmdFlags.GetString("execFile"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ExecFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_latest", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("latest", testValue) + if vBool, err := cmdFlags.GetBool("latest"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Latest) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.fieldSelector", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.fieldSelector", testValue) + if vString, err := cmdFlags.GetString("filter.fieldSelector"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.FieldSelector) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.sortBy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.sortBy", testValue) + if vString, err := cmdFlags.GetString("filter.sortBy"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.SortBy) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.limit", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.limit", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.limit"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Limit) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.asc", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.asc", testValue) + if vBool, err := cmdFlags.GetBool("filter.asc"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Filter.Asc) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.page", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.page", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.page"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Page) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_workflow", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("workflow", testValue) + if vString, err := cmdFlags.GetString("workflow"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Workflow) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/launchplan/launchplan_config.go b/flytectl/cmd/config/subcommand/launchplan/launchplan_config.go new file mode 100644 index 0000000000..1a10764384 --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/launchplan_config.go @@ -0,0 +1,21 @@ +package launchplan + +import ( + "github.com/flyteorg/flytectl/pkg/filters" +) + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Filter: filters.DefaultFilter, + } +) + +// Config +type Config struct { + ExecFile string `json:"execFile" pflag:",execution file name to be used for generating execution spec of a single launchplan."` + Version string `json:"version" pflag:",version of the launchplan to be fetched."` + Latest bool `json:"latest" pflag:", flag to indicate to fetch the latest version, version flag will be ignored in this case"` + Filter filters.Filters `json:"filter" pflag:","` + Workflow string `json:"workflow" pflag:",name of the workflow for which the launchplans need to be fetched."` +} diff --git a/flytectl/cmd/config/subcommand/launchplan/updateconfig.go b/flytectl/cmd/config/subcommand/launchplan/updateconfig.go new file mode 100644 index 0000000000..5d3b113dac --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/updateconfig.go @@ -0,0 +1,16 @@ +package launchplan + +//go:generate pflags UpdateConfig --default-var UConfig --bind-default-var +var ( + UConfig = &UpdateConfig{} +) + +// Config +type UpdateConfig struct { + Activate bool `json:"activate" pflag:",activate launchplan."` + Archive bool `json:"archive" pflag:",(Deprecated) disable the launch plan schedule (if it has an active schedule associated with it)."` + Deactivate bool `json:"deactivate" pflag:",disable the launch plan schedule (if it has an active schedule associated with it)."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` + Version string `json:"version" pflag:",version of the launchplan to be fetched."` +} diff --git a/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags.go b/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags.go new file mode 100644 index 0000000000..b71224e72b --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags.go @@ -0,0 +1,60 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package launchplan + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (UpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (UpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (UpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in UpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg UpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("UpdateConfig", pflag.ExitOnError) + cmdFlags.BoolVar(&UConfig.Activate, fmt.Sprintf("%v%v", prefix, "activate"), UConfig.Activate, "activate launchplan.") + cmdFlags.BoolVar(&UConfig.Archive, fmt.Sprintf("%v%v", prefix, "archive"), UConfig.Archive, "(Deprecated) disable the launch plan schedule (if it has an active schedule associated with it).") + cmdFlags.BoolVar(&UConfig.Deactivate, fmt.Sprintf("%v%v", prefix, "deactivate"), UConfig.Deactivate, "disable the launch plan schedule (if it has an active schedule associated with it).") + cmdFlags.BoolVar(&UConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), UConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&UConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), UConfig.Force, "do not ask for an acknowledgement during updates.") + cmdFlags.StringVar(&UConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), UConfig.Version, "version of the launchplan to be fetched.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags_test.go b/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags_test.go new file mode 100755 index 0000000000..fc58e7ac8f --- /dev/null +++ b/flytectl/cmd/config/subcommand/launchplan/updateconfig_flags_test.go @@ -0,0 +1,186 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package launchplan + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_UpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_UpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_UpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_UpdateConfig(val, result)) +} + +func testDecodeRaw_UpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_UpdateConfig(vStringSlice, result)) +} + +func TestUpdateConfig_GetPFlagSet(t *testing.T) { + val := UpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestUpdateConfig_SetFlags(t *testing.T) { + actual := UpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_activate", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("activate", testValue) + if vBool, err := cmdFlags.GetBool("activate"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Activate) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_archive", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archive", testValue) + if vBool, err := cmdFlags.GetBool("archive"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Archive) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_deactivate", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("deactivate", testValue) + if vBool, err := cmdFlags.GetBool("deactivate"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Deactivate) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_UpdateConfig(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/matchable_attr_file_config_utils.go b/flytectl/cmd/config/subcommand/matchable_attr_file_config_utils.go new file mode 100644 index 0000000000..b21e7832a9 --- /dev/null +++ b/flytectl/cmd/config/subcommand/matchable_attr_file_config_utils.go @@ -0,0 +1,60 @@ +package subcommand + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" + "sigs.k8s.io/yaml" +) + +// WriteConfigToFile used for marshaling the Config to a file which can then be used for update/delete +func WriteConfigToFile(matchableAttrConfig interface{}, fileName string) error { + d, err := yaml.Marshal(matchableAttrConfig) + if err != nil { + return fmt.Errorf("error: %v", err) + } + if _, err = os.Stat(fileName); err == nil { + if !cmdUtil.AskForConfirmation(fmt.Sprintf("warning file %v will be overwritten", fileName), os.Stdin) { + return fmt.Errorf("backup the file before continuing") + } + } + return ioutil.WriteFile(fileName, d, 0600) +} + +// String Dumps the json representation of the TaskResourceAttrFileConfig +func String(matchableAttrConfig interface{}) string { + tj, err := json.Marshal(matchableAttrConfig) + if err != nil { + fmt.Println(err) + return "marshaling error" + } + return fmt.Sprintf("%s\n", tj) +} + +// ReadConfigFromFile used for unmarshaling the Config from a file which is used for update/delete +func ReadConfigFromFile(matchableAttrConfig interface{}, fileName string) error { + data, err := ioutil.ReadFile(fileName) + if err != nil { + return fmt.Errorf("unable to read from %v yaml file", fileName) + } + if err = yaml.UnmarshalStrict(data, matchableAttrConfig); err != nil { + return err + } + return nil +} + +func DumpTaskResourceAttr(matchableAttrConfig interface{}, fileName string) error { + // Write Config to file if filename provided in the command + if len(fileName) > 0 { + if err := WriteConfigToFile(matchableAttrConfig, fileName); err != nil { + return fmt.Errorf("error dumping in file due to %v", err) + } + fmt.Printf("wrote the config to file %v", fileName) + } else { + fmt.Printf("%v", String(matchableAttrConfig)) + } + return nil +} diff --git a/flytectl/cmd/config/subcommand/matchable_attribute_decorator.go b/flytectl/cmd/config/subcommand/matchable_attribute_decorator.go new file mode 100644 index 0000000000..9e6aadfa67 --- /dev/null +++ b/flytectl/cmd/config/subcommand/matchable_attribute_decorator.go @@ -0,0 +1,13 @@ +package subcommand + +import "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + +// MatchableAttributeDecorator defines a decorator for any matchable attribute target. +type MatchableAttributeDecorator interface { + Decorate() *admin.MatchingAttributes +} + +// MatchableAttributeUnDecorator defines a undecorator to get the target. +type MatchableAttributeUnDecorator interface { + UnDecorate(*admin.MatchingAttributes) +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..f2bdb90859 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..006cfa064b --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags.go new file mode 100755 index 0000000000..e7c79a92b4 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..d5206f1d33 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrfetchconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags.go new file mode 100755 index 0000000000..82e5cb6661 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..309c31746a --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package pluginoverride + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/delete_config.go b/flytectl/cmd/config/subcommand/plugin_override/delete_config.go new file mode 100644 index 0000000000..5ae0f08b9d --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/delete_config.go @@ -0,0 +1,11 @@ +package pluginoverride + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/plugin_override/fetch_config.go b/flytectl/cmd/config/subcommand/plugin_override/fetch_config.go new file mode 100644 index 0000000000..e2283e7c80 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/fetch_config.go @@ -0,0 +1,9 @@ +package pluginoverride + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/plugin_override/file_config.go b/flytectl/cmd/config/subcommand/plugin_override/file_config.go new file mode 100644 index 0000000000..15349c83e7 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/file_config.go @@ -0,0 +1,47 @@ +package pluginoverride + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// FileConfig shadow Config for PluginOverrides. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type FileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.PluginOverrides +} + +// Decorate decorator over PluginOverrides. +func (t FileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: t.PluginOverrides, + }, + } +} + +// UnDecorate to uncover PluginOverrides. +func (t *FileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + t.PluginOverrides = matchingAttribute.GetPluginOverrides() +} + +// GetProject from the FileConfig +func (t FileConfig) GetProject() string { + return t.Project +} + +// GetDomain from the FileConfig +func (t FileConfig) GetDomain() string { + return t.Domain +} + +// GetWorkflow from the FileConfig +func (t FileConfig) GetWorkflow() string { + return t.Workflow +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/file_config_test.go b/flytectl/cmd/config/subcommand/plugin_override/file_config_test.go new file mode 100644 index 0000000000..13037945af --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/file_config_test.go @@ -0,0 +1,58 @@ +package pluginoverride + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + pluginOverride1 := &admin.PluginOverride{ + TaskType: "python_task", + PluginId: []string{"plugin-override1", "plugin-override2"}, + MissingPluginBehavior: admin.PluginOverride_FAIL, + } + pluginOverride2 := &admin.PluginOverride{ + TaskType: "java_task", + PluginId: []string{"plugin-override3", "plugin-override3"}, + MissingPluginBehavior: admin.PluginOverride_USE_DEFAULT, + } + pluginOverrides := []*admin.PluginOverride{pluginOverride1, pluginOverride2} + + pluginOverrideFileConfig := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + PluginOverrides: &admin.PluginOverrides{ + Overrides: pluginOverrides, + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: pluginOverrideFileConfig.PluginOverrides, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, pluginOverrideFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + taskAttrFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + taskAttrFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, pluginOverrideFileConfig, taskAttrFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + taskAttrFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", taskAttrFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", taskAttrFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", taskAttrFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/plugin_override/update_config.go b/flytectl/cmd/config/subcommand/plugin_override/update_config.go new file mode 100644 index 0000000000..dc3c260074 --- /dev/null +++ b/flytectl/cmd/config/subcommand/plugin_override/update_config.go @@ -0,0 +1,12 @@ +package pluginoverride + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/config/subcommand/project/config_flags.go b/flytectl/cmd/config/subcommand/project/config_flags.go new file mode 100755 index 0000000000..ec92b98635 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/config_flags.go @@ -0,0 +1,59 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package project + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.Filter.FieldSelector, fmt.Sprintf("%v%v", prefix, "filter.fieldSelector"), DefaultConfig.Filter.FieldSelector, "Specifies the Field selector") + cmdFlags.StringVar(&DefaultConfig.Filter.SortBy, fmt.Sprintf("%v%v", prefix, "filter.sortBy"), DefaultConfig.Filter.SortBy, "Specifies which field to sort results ") + cmdFlags.Int32Var(&DefaultConfig.Filter.Limit, fmt.Sprintf("%v%v", prefix, "filter.limit"), DefaultConfig.Filter.Limit, "Specifies the limit") + cmdFlags.BoolVar(&DefaultConfig.Filter.Asc, fmt.Sprintf("%v%v", prefix, "filter.asc"), DefaultConfig.Filter.Asc, "Specifies the sorting order. By default flytectl sort result in descending order") + cmdFlags.Int32Var(&DefaultConfig.Filter.Page, fmt.Sprintf("%v%v", prefix, "filter.page"), DefaultConfig.Filter.Page, "Specifies the page number, in case there are multiple pages of results") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/project/config_flags_test.go b/flytectl/cmd/config/subcommand/project/config_flags_test.go new file mode 100755 index 0000000000..78bd4ca726 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/config_flags_test.go @@ -0,0 +1,172 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package project + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_filter.fieldSelector", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.fieldSelector", testValue) + if vString, err := cmdFlags.GetString("filter.fieldSelector"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.FieldSelector) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.sortBy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.sortBy", testValue) + if vString, err := cmdFlags.GetString("filter.sortBy"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.SortBy) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.limit", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.limit", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.limit"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Limit) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.asc", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.asc", testValue) + if vBool, err := cmdFlags.GetBool("filter.asc"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Filter.Asc) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.page", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.page", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.page"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Page) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/project/configproject_flags.go b/flytectl/cmd/config/subcommand/project/configproject_flags.go new file mode 100755 index 0000000000..6de8107e76 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/configproject_flags.go @@ -0,0 +1,65 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package project + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (ConfigProject) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (ConfigProject) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (ConfigProject) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in ConfigProject and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg ConfigProject) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("ConfigProject", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultProjectConfig.ID, fmt.Sprintf("%v%v", prefix, "id"), DefaultProjectConfig.ID, "id for the project specified as argument.") + cmdFlags.BoolVar(&DefaultProjectConfig.ActivateProject, fmt.Sprintf("%v%v", prefix, "activateProject"), DefaultProjectConfig.ActivateProject, "(Deprecated) Activates the project specified as argument. Only used in update") + cmdFlags.BoolVar(&DefaultProjectConfig.ArchiveProject, fmt.Sprintf("%v%v", prefix, "archiveProject"), DefaultProjectConfig.ArchiveProject, "(Deprecated) Archives the project specified as argument. Only used in update") + cmdFlags.BoolVar(&DefaultProjectConfig.Activate, fmt.Sprintf("%v%v", prefix, "activate"), DefaultProjectConfig.Activate, "Activates the project specified as argument. Only used in update") + cmdFlags.BoolVar(&DefaultProjectConfig.Archive, fmt.Sprintf("%v%v", prefix, "archive"), DefaultProjectConfig.Archive, "Archives the project specified as argument. Only used in update") + cmdFlags.BoolVar(&DefaultProjectConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultProjectConfig.Force, "Skips asking for an acknowledgement during an update operation. Only used in update") + cmdFlags.StringVar(&DefaultProjectConfig.Name, fmt.Sprintf("%v%v", prefix, "name"), DefaultProjectConfig.Name, "name for the project specified as argument.") + cmdFlags.BoolVar(&DefaultProjectConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultProjectConfig.DryRun, "execute command without making any modifications.") + cmdFlags.StringVar(&DefaultProjectConfig.Description, fmt.Sprintf("%v%v", prefix, "description"), DefaultProjectConfig.Description, "description for the project specified as argument.") + cmdFlags.StringToStringVar(&DefaultProjectConfig.Labels, fmt.Sprintf("%v%v", prefix, "labels"), DefaultProjectConfig.Labels, "labels for the project specified as argument.") + cmdFlags.StringVar(&DefaultProjectConfig.File, fmt.Sprintf("%v%v", prefix, "file"), DefaultProjectConfig.File, "file for the project definition.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/project/configproject_flags_test.go b/flytectl/cmd/config/subcommand/project/configproject_flags_test.go new file mode 100755 index 0000000000..98847d779a --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/configproject_flags_test.go @@ -0,0 +1,256 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package project + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfigProject = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfigProject(t reflect.Kind) bool { + _, exists := dereferencableKindsConfigProject[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfigProject(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfigProject(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_ConfigProject(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfigProject, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_ConfigProject(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_ConfigProject(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_ConfigProject(val, result)) +} + +func testDecodeRaw_ConfigProject(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_ConfigProject(vStringSlice, result)) +} + +func TestConfigProject_GetPFlagSet(t *testing.T) { + val := ConfigProject{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfigProject_SetFlags(t *testing.T) { + actual := ConfigProject{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_id", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("id", testValue) + if vString, err := cmdFlags.GetString("id"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vString), &actual.ID) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_activateProject", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("activateProject", testValue) + if vBool, err := cmdFlags.GetBool("activateProject"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.ActivateProject) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_archiveProject", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archiveProject", testValue) + if vBool, err := cmdFlags.GetBool("archiveProject"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.ArchiveProject) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_activate", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("activate", testValue) + if vBool, err := cmdFlags.GetBool("activate"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.Activate) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_archive", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archive", testValue) + if vBool, err := cmdFlags.GetBool("archive"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.Archive) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("name", testValue) + if vString, err := cmdFlags.GetString("name"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vString), &actual.Name) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_description", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("description", testValue) + if vString, err := cmdFlags.GetString("description"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vString), &actual.Description) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_labels", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "a=1,b=2" + + cmdFlags.Set("labels", testValue) + if vStringToString, err := cmdFlags.GetStringToString("labels"); err == nil { + testDecodeRaw_ConfigProject(t, vStringToString, &actual.Labels) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_file", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("file", testValue) + if vString, err := cmdFlags.GetString("file"); err == nil { + testDecodeJson_ConfigProject(t, fmt.Sprintf("%v", vString), &actual.File) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/project/project_config.go b/flytectl/cmd/config/subcommand/project/project_config.go new file mode 100644 index 0000000000..794f661df9 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/project_config.go @@ -0,0 +1,115 @@ +package project + +import ( + "fmt" + "io/ioutil" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/pkg/filters" + + "gopkg.in/yaml.v3" +) + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Filter: filters.DefaultFilter, + } +) + +// Config holds the flag for get project +type Config struct { + Filter filters.Filters `json:"filter" pflag:","` +} + +//go:generate pflags ConfigProject --default-var DefaultProjectConfig --bind-default-var + +// ConfigProject hold configuration for project update flags. +type ConfigProject struct { + ID string `json:"id" pflag:",id for the project specified as argument."` + ActivateProject bool `json:"activateProject" pflag:",(Deprecated) Activates the project specified as argument. Only used in update"` + ArchiveProject bool `json:"archiveProject" pflag:",(Deprecated) Archives the project specified as argument. Only used in update"` + Activate bool `json:"activate" pflag:",Activates the project specified as argument. Only used in update"` + Archive bool `json:"archive" pflag:",Archives the project specified as argument. Only used in update"` + Force bool `json:"force" pflag:",Skips asking for an acknowledgement during an update operation. Only used in update"` + Name string `json:"name" pflag:",name for the project specified as argument."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Description string `json:"description" pflag:",description for the project specified as argument."` + Labels map[string]string `json:"labels" pflag:",labels for the project specified as argument."` + File string `json:"file" pflag:",file for the project definition."` +} + +var DefaultProjectConfig = &ConfigProject{ + Description: "", + Labels: map[string]string{}, +} + +// GetProjectSpec return project spec from a file/flags +func (c *ConfigProject) GetProjectSpec(cf *config.Config) (*admin.Project, error) { + projectSpec := admin.Project{} + + if len(c.File) > 0 { + yamlFile, err := ioutil.ReadFile(c.File) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(yamlFile, &projectSpec) + if err != nil { + return nil, err + } + } else { + projectSpec.Id = c.ID + projectSpec.Name = c.Name + projectSpec.Description = c.Description + projectSpec.Labels = &admin.Labels{ + Values: c.Labels, + } + projectState, err := c.MapToAdminState() + if err != nil { + return nil, err + } + projectSpec.State = projectState + } + + project := cf.Project + if len(projectSpec.Id) == 0 && len(project) == 0 { + err := fmt.Errorf(clierrors.ErrProjectNotPassed) + return nil, err + } + + if len(projectSpec.Id) > 0 && len(project) > 0 { + err := fmt.Errorf(clierrors.ErrProjectIDBothPassed) + return nil, err + } + + // Get projectId from file, if not provided, fall back to project + if len(projectSpec.Id) == 0 { + projectSpec.Id = project + } + return &projectSpec, nil +} + +// MapToAdminState return project spec from a file/flags +func (c *ConfigProject) MapToAdminState() (admin.Project_ProjectState, error) { + if c.ActivateProject { + c.Activate = c.ActivateProject + } + if c.ArchiveProject { + c.Archive = c.ArchiveProject + } + + activate := c.Activate + archive := c.Archive + + if activate || archive { + if activate == archive { + return admin.Project_ACTIVE, fmt.Errorf(clierrors.ErrInvalidStateUpdate) + } + if archive { + return admin.Project_ARCHIVED, nil + } + } + return admin.Project_ACTIVE, nil +} diff --git a/flytectl/cmd/config/subcommand/project/project_config_test.go b/flytectl/cmd/config/subcommand/project/project_config_test.go new file mode 100644 index 0000000000..69a8bf4acb --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/project_config_test.go @@ -0,0 +1,82 @@ +package project + +import ( + "errors" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/stretchr/testify/assert" +) + +func TestGetProjectSpec(t *testing.T) { + cf := &config.Config{ + Project: "flytesnacks1", + } + t.Run("Successful get project spec", func(t *testing.T) { + c := &ConfigProject{ + Name: "flytesnacks", + } + response, err := c.GetProjectSpec(cf) + assert.Nil(t, err) + assert.Equal(t, "flytesnacks1", response.Id) + }) + + t.Run("Error if project and ID both exist", func(t *testing.T) { + c := &ConfigProject{ + ID: "flytesnacks", + Name: "flytesnacks", + } + _, err := c.GetProjectSpec(cf) + assert.NotNil(t, err) + }) + + t.Run("Successful get request spec from file", func(t *testing.T) { + c := &ConfigProject{ + File: "testdata/project.yaml", + } + response, err := c.GetProjectSpec(&config.Config{}) + assert.Nil(t, err) + assert.Equal(t, "flytesnacks", response.Name) + assert.Equal(t, "flytesnacks test", response.Description) + }) +} + +func TestMapToAdminState(t *testing.T) { + t.Run("Successful mapToAdminState with archive", func(t *testing.T) { + c := &ConfigProject{ + Archive: true, + } + state, err := c.MapToAdminState() + assert.Nil(t, err) + assert.Equal(t, admin.Project_ARCHIVED, state) + }) + t.Run("Successful mapToAdminState with activate", func(t *testing.T) { + c := &ConfigProject{ + Activate: true, + } + state, err := c.MapToAdminState() + assert.Nil(t, err) + assert.Equal(t, admin.Project_ACTIVE, state) + }) + t.Run("Invalid state", func(t *testing.T) { + c := &ConfigProject{ + Activate: true, + Archive: true, + } + state, err := c.MapToAdminState() + assert.NotNil(t, err) + assert.Equal(t, errors.New(clierrors.ErrInvalidStateUpdate), err) + assert.Equal(t, admin.Project_ACTIVE, state) + }) + t.Run("deprecated Flags Test", func(t *testing.T) { + c := &ConfigProject{ + ActivateProject: true, + } + state, err := c.MapToAdminState() + assert.Nil(t, err) + assert.Equal(t, admin.Project_ACTIVE, state) + }) +} diff --git a/flytectl/cmd/config/subcommand/project/testdata/project.yaml b/flytectl/cmd/config/subcommand/project/testdata/project.yaml new file mode 100644 index 0000000000..8b5fd32841 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/testdata/project.yaml @@ -0,0 +1,14 @@ +description: flytesnacks test +domains: + - id: development + name: development + - id: staging + name: staging + - id: production + name: production +id: flytesnacks +labels: + values: + team: flytesnacks +name: flytesnacks + diff --git a/flytectl/cmd/config/subcommand/project/testdata/project_error.yaml b/flytectl/cmd/config/subcommand/project/testdata/project_error.yaml new file mode 100644 index 0000000000..0b7088ec63 --- /dev/null +++ b/flytectl/cmd/config/subcommand/project/testdata/project_error.yaml @@ -0,0 +1 @@ +Data diff --git a/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go new file mode 100644 index 0000000000..ee77f9ca3f --- /dev/null +++ b/flytectl/cmd/config/subcommand/project_domain_workflow_getter.go @@ -0,0 +1,40 @@ +package subcommand + +import rootConfig "github.com/flyteorg/flytectl/cmd/config" + +// ProjectDomainWorkflowGetter defines a interface for getting the project domain workflow. +type ProjectDomainWorkflowGetter interface { + GetProject() string + GetDomain() string + GetWorkflow() string +} + +// PDWGetterCommandLine implements the command line way of getting project domain and workflow +type PDWGetterCommandLine struct { + Config *rootConfig.Config + Args []string +} + +// GetProject returns the cobra parsed Project from the Config +func (g PDWGetterCommandLine) GetProject() string { + if g.Config == nil { + return "" + } + return g.Config.Project +} + +// GetDomain returns the cobra parsed Domain from the Config +func (g PDWGetterCommandLine) GetDomain() string { + if g.Config == nil { + return "" + } + return g.Config.Domain +} + +// GetWorkflow returns the first argument from the commandline +func (g PDWGetterCommandLine) GetWorkflow() string { + if g.Args == nil || len(g.Args) == 0 { + return "" + } + return g.Args[0] +} diff --git a/flytectl/cmd/config/subcommand/register/files_config.go b/flytectl/cmd/config/subcommand/register/files_config.go new file mode 100644 index 0000000000..c1441d0567 --- /dev/null +++ b/flytectl/cmd/config/subcommand/register/files_config.go @@ -0,0 +1,34 @@ +package register + +import "github.com/flyteorg/flyte/flytestdlib/config" + +//go:generate pflags FilesConfig --default-var DefaultFilesConfig --bind-default-var + +var ( + DefaultFilesConfig = &FilesConfig{ + Version: "", + ContinueOnError: false, + } + + cfg = config.MustRegisterSection("files", DefaultFilesConfig) +) + +// FilesConfig containing flags used for registration +type FilesConfig struct { + Version string `json:"version" pflag:",Version of the entity to be registered with flyte which are un-versioned after serialization."` + Force bool `json:"force" pflag:",Force use of version number on entities registered with flyte."` + ContinueOnError bool `json:"continueOnError" pflag:",Continue on error when registering files."` + Archive bool `json:"archive" pflag:",Pass in archive file either an http link or local path."` + AssumableIamRole string `json:"assumableIamRole" pflag:",Custom assumable iam auth role to register launch plans with."` + K8sServiceAccount string `json:"k8sServiceAccount" pflag:",Custom kubernetes service account auth role to register launch plans with."` + K8ServiceAccount string `json:"k8ServiceAccount" pflag:",Deprecated. Please use --K8sServiceAccount"` + OutputLocationPrefix string `json:"outputLocationPrefix" pflag:",Custom output location prefix for offloaded types (files/schemas)."` + DeprecatedSourceUploadPath string `json:"sourceUploadPath" pflag:",Deprecated: Update flyte admin to avoid having to configure storage access from flytectl."` + DestinationDirectory string `json:"destinationDirectory" pflag:",Location of source code in container."` + DryRun bool `json:"dryRun" pflag:",Execute command without making any modifications."` + EnableSchedule bool `json:"enableSchedule" pflag:",Enable the schedule if the files contain schedulable launchplan."` +} + +func GetConfig() *FilesConfig { + return cfg.GetConfig().(*FilesConfig) +} diff --git a/flytectl/cmd/config/subcommand/register/filesconfig_flags.go b/flytectl/cmd/config/subcommand/register/filesconfig_flags.go new file mode 100755 index 0000000000..10d4756cfb --- /dev/null +++ b/flytectl/cmd/config/subcommand/register/filesconfig_flags.go @@ -0,0 +1,66 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package register + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (FilesConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (FilesConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (FilesConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in FilesConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg FilesConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("FilesConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFilesConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), DefaultFilesConfig.Version, "Version of the entity to be registered with flyte which are un-versioned after serialization.") + cmdFlags.BoolVar(&DefaultFilesConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultFilesConfig.Force, "Force use of version number on entities registered with flyte.") + cmdFlags.BoolVar(&DefaultFilesConfig.ContinueOnError, fmt.Sprintf("%v%v", prefix, "continueOnError"), DefaultFilesConfig.ContinueOnError, "Continue on error when registering files.") + cmdFlags.BoolVar(&DefaultFilesConfig.Archive, fmt.Sprintf("%v%v", prefix, "archive"), DefaultFilesConfig.Archive, "Pass in archive file either an http link or local path.") + cmdFlags.StringVar(&DefaultFilesConfig.AssumableIamRole, fmt.Sprintf("%v%v", prefix, "assumableIamRole"), DefaultFilesConfig.AssumableIamRole, "Custom assumable iam auth role to register launch plans with.") + cmdFlags.StringVar(&DefaultFilesConfig.K8sServiceAccount, fmt.Sprintf("%v%v", prefix, "k8sServiceAccount"), DefaultFilesConfig.K8sServiceAccount, "Custom kubernetes service account auth role to register launch plans with.") + cmdFlags.StringVar(&DefaultFilesConfig.K8ServiceAccount, fmt.Sprintf("%v%v", prefix, "k8ServiceAccount"), DefaultFilesConfig.K8ServiceAccount, "Deprecated. Please use --K8sServiceAccount") + cmdFlags.StringVar(&DefaultFilesConfig.OutputLocationPrefix, fmt.Sprintf("%v%v", prefix, "outputLocationPrefix"), DefaultFilesConfig.OutputLocationPrefix, "Custom output location prefix for offloaded types (files/schemas).") + cmdFlags.StringVar(&DefaultFilesConfig.DeprecatedSourceUploadPath, fmt.Sprintf("%v%v", prefix, "sourceUploadPath"), DefaultFilesConfig.DeprecatedSourceUploadPath, "Deprecated: Update flyte admin to avoid having to configure storage access from flytectl.") + cmdFlags.StringVar(&DefaultFilesConfig.DestinationDirectory, fmt.Sprintf("%v%v", prefix, "destinationDirectory"), DefaultFilesConfig.DestinationDirectory, "Location of source code in container.") + cmdFlags.BoolVar(&DefaultFilesConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultFilesConfig.DryRun, "Execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultFilesConfig.EnableSchedule, fmt.Sprintf("%v%v", prefix, "enableSchedule"), DefaultFilesConfig.EnableSchedule, "Enable the schedule if the files contain schedulable launchplan.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/register/filesconfig_flags_test.go b/flytectl/cmd/config/subcommand/register/filesconfig_flags_test.go new file mode 100755 index 0000000000..14fbc00ed2 --- /dev/null +++ b/flytectl/cmd/config/subcommand/register/filesconfig_flags_test.go @@ -0,0 +1,270 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package register + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsFilesConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementFilesConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsFilesConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookFilesConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementFilesConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_FilesConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookFilesConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_FilesConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_FilesConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_FilesConfig(val, result)) +} + +func testDecodeRaw_FilesConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_FilesConfig(vStringSlice, result)) +} + +func TestFilesConfig_GetPFlagSet(t *testing.T) { + val := FilesConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestFilesConfig_SetFlags(t *testing.T) { + actual := FilesConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_continueOnError", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("continueOnError", testValue) + if vBool, err := cmdFlags.GetBool("continueOnError"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vBool), &actual.ContinueOnError) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_archive", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archive", testValue) + if vBool, err := cmdFlags.GetBool("archive"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vBool), &actual.Archive) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_assumableIamRole", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("assumableIamRole", testValue) + if vString, err := cmdFlags.GetString("assumableIamRole"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.AssumableIamRole) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_k8sServiceAccount", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("k8sServiceAccount", testValue) + if vString, err := cmdFlags.GetString("k8sServiceAccount"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.K8sServiceAccount) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_k8ServiceAccount", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("k8ServiceAccount", testValue) + if vString, err := cmdFlags.GetString("k8ServiceAccount"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.K8ServiceAccount) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_outputLocationPrefix", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("outputLocationPrefix", testValue) + if vString, err := cmdFlags.GetString("outputLocationPrefix"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.OutputLocationPrefix) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_sourceUploadPath", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("sourceUploadPath", testValue) + if vString, err := cmdFlags.GetString("sourceUploadPath"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.DeprecatedSourceUploadPath) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_destinationDirectory", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("destinationDirectory", testValue) + if vString, err := cmdFlags.GetString("destinationDirectory"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vString), &actual.DestinationDirectory) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_enableSchedule", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("enableSchedule", testValue) + if vBool, err := cmdFlags.GetBool("enableSchedule"); err == nil { + testDecodeJson_FilesConfig(t, fmt.Sprintf("%v", vBool), &actual.EnableSchedule) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/sandbox/config_flags.go b/flytectl/cmd/config/subcommand/sandbox/config_flags.go new file mode 100755 index 0000000000..32e1423057 --- /dev/null +++ b/flytectl/cmd/config/subcommand/sandbox/config_flags.go @@ -0,0 +1,66 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package sandbox + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.DeprecatedSource, fmt.Sprintf("%v%v", prefix, "source"), DefaultConfig.DeprecatedSource, "deprecated, path of your source code, please build images with local daemon") + cmdFlags.StringVar(&DefaultConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), DefaultConfig.Version, "Version of flyte. Only supports flyte releases greater than v0.10.0") + cmdFlags.StringVar(&DefaultConfig.Image, fmt.Sprintf("%v%v", prefix, "image"), DefaultConfig.Image, "Optional. Provide a fully qualified path to a Flyte compliant docker image.") + cmdFlags.BoolVar(&DefaultConfig.Prerelease, fmt.Sprintf("%v%v", prefix, "pre"), DefaultConfig.Prerelease, "Optional. Pre release Version of flyte will be used for sandbox.") + cmdFlags.BoolVar(&DefaultConfig.DisableAgent, fmt.Sprintf("%v%v", prefix, "disable-agent"), DefaultConfig.DisableAgent, "Optional. Disable the agent service.") + cmdFlags.StringSliceVar(&DefaultConfig.Env, fmt.Sprintf("%v%v", prefix, "env"), DefaultConfig.Env, "Optional. Provide Env variable in key=value format which can be passed to sandbox container.") + cmdFlags.Var(&DefaultConfig.ImagePullPolicy, fmt.Sprintf("%v%v", prefix, "imagePullPolicy"), "Optional. Defines the image pull behavior [Always/IfNotPresent/Never]") + cmdFlags.StringVar(&DefaultConfig.ImagePullOptions.RegistryAuth, fmt.Sprintf("%v%v", prefix, "imagePullOptions.registryAuth"), DefaultConfig.ImagePullOptions.RegistryAuth, "The base64 encoded credentials for the registry.") + cmdFlags.StringVar(&DefaultConfig.ImagePullOptions.Platform, fmt.Sprintf("%v%v", prefix, "imagePullOptions.platform"), DefaultConfig.ImagePullOptions.Platform, "Forces a specific platform's image to be pulled.'") + cmdFlags.BoolVar(&DefaultConfig.Dev, fmt.Sprintf("%v%v", prefix, "dev"), DefaultConfig.Dev, "Optional. Only start minio and postgres in the sandbox.") + cmdFlags.BoolVar(&DefaultConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultConfig.DryRun, "Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use'") + cmdFlags.BoolVar(&DefaultConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultConfig.Force, "Optional. Forcefully delete existing sandbox cluster if it exists.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go b/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go new file mode 100755 index 0000000000..8519a75583 --- /dev/null +++ b/flytectl/cmd/config/subcommand/sandbox/config_flags_test.go @@ -0,0 +1,268 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package sandbox + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_source", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("source", testValue) + if vString, err := cmdFlags.GetString("source"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.DeprecatedSource) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_image", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("image", testValue) + if vString, err := cmdFlags.GetString("image"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Image) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_pre", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("pre", testValue) + if vBool, err := cmdFlags.GetBool("pre"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Prerelease) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_disable-agent", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("disable-agent", testValue) + if vBool, err := cmdFlags.GetBool("disable-agent"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.DisableAgent) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_env", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := join_Config(DefaultConfig.Env, ",") + + cmdFlags.Set("env", testValue) + if vStringSlice, err := cmdFlags.GetStringSlice("env"); err == nil { + testDecodeRaw_Config(t, join_Config(vStringSlice, ","), &actual.Env) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_imagePullPolicy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("imagePullPolicy", testValue) + if v := cmdFlags.Lookup("imagePullPolicy"); v != nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", v.Value.String()), &actual.ImagePullPolicy) + + } + }) + }) + t.Run("Test_imagePullOptions.registryAuth", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("imagePullOptions.registryAuth", testValue) + if vString, err := cmdFlags.GetString("imagePullOptions.registryAuth"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ImagePullOptions.RegistryAuth) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_imagePullOptions.platform", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("imagePullOptions.platform", testValue) + if vString, err := cmdFlags.GetString("imagePullOptions.platform"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ImagePullOptions.Platform) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dev", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dev", testValue) + if vBool, err := cmdFlags.GetBool("dev"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Dev) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go b/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go new file mode 100644 index 0000000000..f566de0118 --- /dev/null +++ b/flytectl/cmd/config/subcommand/sandbox/sandbox_config.go @@ -0,0 +1,44 @@ +package sandbox + +import "github.com/flyteorg/flytectl/pkg/docker" + +// Config holds configuration flags for sandbox command. +type Config struct { + DeprecatedSource string `json:"source" pflag:",deprecated, path of your source code, please build images with local daemon"` + + // Flytectl sandbox only supports Flyte version available in Github release https://github.com/flyteorg/flyte/tags. + // Flytectl sandbox will only work for v0.10.0+. + // Default value dind represents the latest release. + Version string `json:"version" pflag:",Version of flyte. Only supports flyte releases greater than v0.10.0"` + + // Optionally it is possible to specify a specific fqn for the docker image with the tag. This should be + // Flyte compliant sandbox image. Usually useful, if you want to push the image to your own registry and relaunch + // from there. + Image string `json:"image" pflag:",Optional. Provide a fully qualified path to a Flyte compliant docker image."` + + // Default value false represents that Flytectl will not use the latest pre-release if it exists. + Prerelease bool `json:"pre" pflag:",Optional. Pre release Version of flyte will be used for sandbox."` + + // Agent Service + DisableAgent bool `json:"disable-agent" pflag:",Optional. Disable the agent service."` + + // Optionally it is possible to pass in environment variables to sandbox container. + Env []string `json:"env" pflag:",Optional. Provide Env variable in key=value format which can be passed to sandbox container."` + + // Optionally it is possible to use local sandbox image + // Flytectl will not pull the image from the registry if the local flag passes. It is usually useful while testing your local images without pushing them to a registry. + ImagePullPolicy docker.ImagePullPolicy `json:"imagePullPolicy" pflag:",Optional. Defines the image pull behavior [Always/IfNotPresent/Never]"` + + ImagePullOptions docker.ImagePullOptions `json:"imagePullOptions" pflag:",Optional. Defines image pull options (e.g. auth)"` + + // It's used for development. Users are able to start flyte locally via single binary and save the data to the minio or postgres in the sandbox. + Dev bool `json:"dev" pflag:",Optional. Only start minio and postgres in the sandbox."` + DryRun bool `json:"dryRun" pflag:",Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use'"` + + Force bool `json:"force" pflag:",Optional. Forcefully delete existing sandbox cluster if it exists."` +} + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{} +) diff --git a/flytectl/cmd/config/subcommand/sandbox/teardown.go b/flytectl/cmd/config/subcommand/sandbox/teardown.go new file mode 100644 index 0000000000..0e315d6bd0 --- /dev/null +++ b/flytectl/cmd/config/subcommand/sandbox/teardown.go @@ -0,0 +1,21 @@ +package sandbox + +import ( + "fmt" + + "github.com/spf13/pflag" +) + +type TeardownFlags struct { + Volume bool +} + +var ( + DefaultTeardownFlags = &TeardownFlags{} +) + +func (f *TeardownFlags) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("TeardownFlags", pflag.ExitOnError) + cmdFlags.BoolVarP(&f.Volume, fmt.Sprintf("%v%v", prefix, "volume"), "v", f.Volume, "Optional. Clean up Docker volume. This will result in a permanent loss of all data within the database and object store. Use with caution!") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/task/config_flags.go b/flytectl/cmd/config/subcommand/task/config_flags.go new file mode 100755 index 0000000000..ee181f076f --- /dev/null +++ b/flytectl/cmd/config/subcommand/task/config_flags.go @@ -0,0 +1,62 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package task + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.ExecFile, fmt.Sprintf("%v%v", prefix, "execFile"), DefaultConfig.ExecFile, "execution file name to be used for generating execution spec of a single task.") + cmdFlags.StringVar(&DefaultConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), DefaultConfig.Version, "version of the task to be fetched.") + cmdFlags.BoolVar(&DefaultConfig.Latest, fmt.Sprintf("%v%v", prefix, "latest"), DefaultConfig.Latest, " flag to indicate to fetch the latest version, version flag will be ignored in this case") + cmdFlags.StringVar(&DefaultConfig.Filter.FieldSelector, fmt.Sprintf("%v%v", prefix, "filter.fieldSelector"), DefaultConfig.Filter.FieldSelector, "Specifies the Field selector") + cmdFlags.StringVar(&DefaultConfig.Filter.SortBy, fmt.Sprintf("%v%v", prefix, "filter.sortBy"), DefaultConfig.Filter.SortBy, "Specifies which field to sort results ") + cmdFlags.Int32Var(&DefaultConfig.Filter.Limit, fmt.Sprintf("%v%v", prefix, "filter.limit"), DefaultConfig.Filter.Limit, "Specifies the limit") + cmdFlags.BoolVar(&DefaultConfig.Filter.Asc, fmt.Sprintf("%v%v", prefix, "filter.asc"), DefaultConfig.Filter.Asc, "Specifies the sorting order. By default flytectl sort result in descending order") + cmdFlags.Int32Var(&DefaultConfig.Filter.Page, fmt.Sprintf("%v%v", prefix, "filter.page"), DefaultConfig.Filter.Page, "Specifies the page number, in case there are multiple pages of results") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/task/config_flags_test.go b/flytectl/cmd/config/subcommand/task/config_flags_test.go new file mode 100755 index 0000000000..52651e144c --- /dev/null +++ b/flytectl/cmd/config/subcommand/task/config_flags_test.go @@ -0,0 +1,214 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package task + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_execFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("execFile", testValue) + if vString, err := cmdFlags.GetString("execFile"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ExecFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_latest", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("latest", testValue) + if vBool, err := cmdFlags.GetBool("latest"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Latest) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.fieldSelector", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.fieldSelector", testValue) + if vString, err := cmdFlags.GetString("filter.fieldSelector"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.FieldSelector) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.sortBy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.sortBy", testValue) + if vString, err := cmdFlags.GetString("filter.sortBy"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.SortBy) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.limit", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.limit", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.limit"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Limit) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.asc", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.asc", testValue) + if vBool, err := cmdFlags.GetBool("filter.asc"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Filter.Asc) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.page", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.page", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.page"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Page) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/task/task_config.go b/flytectl/cmd/config/subcommand/task/task_config.go new file mode 100644 index 0000000000..1e0d6a2bec --- /dev/null +++ b/flytectl/cmd/config/subcommand/task/task_config.go @@ -0,0 +1,18 @@ +package task + +import "github.com/flyteorg/flytectl/pkg/filters" + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var +var ( + DefaultConfig = &Config{ + Filter: filters.DefaultFilter, + } +) + +// Config +type Config struct { + ExecFile string `json:"execFile" pflag:",execution file name to be used for generating execution spec of a single task."` + Version string `json:"version" pflag:",version of the task to be fetched."` + Latest bool `json:"latest" pflag:", flag to indicate to fetch the latest version, version flag will be ignored in this case"` + Filter filters.Filters `json:"filter" pflag:","` +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..2989a37b03 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..d4211ad7cf --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags.go new file mode 100755 index 0000000000..0efc338768 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..038f8a893c --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrfetchconfig_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags.go new file mode 100755 index 0000000000..402add1c0d --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..aaf429a732 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package taskresourceattribute + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/delete_config.go b/flytectl/cmd/config/subcommand/taskresourceattribute/delete_config.go new file mode 100644 index 0000000000..6938c2032b --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/delete_config.go @@ -0,0 +1,11 @@ +package taskresourceattribute + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/fetch_config.go b/flytectl/cmd/config/subcommand/taskresourceattribute/fetch_config.go new file mode 100644 index 0000000000..1fe6de9937 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/fetch_config.go @@ -0,0 +1,9 @@ +package taskresourceattribute + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/file_config.go b/flytectl/cmd/config/subcommand/taskresourceattribute/file_config.go new file mode 100644 index 0000000000..9629a71ee5 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/file_config.go @@ -0,0 +1,47 @@ +package taskresourceattribute + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// TaskResourceAttrFileConfig shadow Config for TaskResourceAttribute. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type TaskResourceAttrFileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.TaskResourceAttributes +} + +// Decorate decorator over TaskResourceAttributes. +func (t TaskResourceAttrFileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: t.TaskResourceAttributes, + }, + } +} + +// UnDecorate to uncover TaskResourceAttributes. +func (t *TaskResourceAttrFileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + t.TaskResourceAttributes = matchingAttribute.GetTaskResourceAttributes() +} + +// GetProject from the TaskResourceAttrFileConfig +func (t TaskResourceAttrFileConfig) GetProject() string { + return t.Project +} + +// GetDomain from the TaskResourceAttrFileConfig +func (t TaskResourceAttrFileConfig) GetDomain() string { + return t.Domain +} + +// GetWorkflow from the TaskResourceAttrFileConfig +func (t TaskResourceAttrFileConfig) GetWorkflow() string { + return t.Workflow +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/file_config_test.go b/flytectl/cmd/config/subcommand/taskresourceattribute/file_config_test.go new file mode 100644 index 0000000000..cba5ea672b --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/file_config_test.go @@ -0,0 +1,53 @@ +package taskresourceattribute + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + taskAttrFileConfig := TaskResourceAttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + TaskResourceAttributes: &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: "1", + Memory: "150Mi", + }, + Limits: &admin.TaskResourceSpec{ + Cpu: "2", + Memory: "350Mi", + }, + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: taskAttrFileConfig.TaskResourceAttributes, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, taskAttrFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + taskAttrFileConfigNew := TaskResourceAttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + taskAttrFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, taskAttrFileConfig, taskAttrFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + taskAttrFileConfigNew := TaskResourceAttrFileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", taskAttrFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", taskAttrFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", taskAttrFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/taskresourceattribute/update_config.go b/flytectl/cmd/config/subcommand/taskresourceattribute/update_config.go new file mode 100644 index 0000000000..4ee836e433 --- /dev/null +++ b/flytectl/cmd/config/subcommand/taskresourceattribute/update_config.go @@ -0,0 +1,12 @@ +package taskresourceattribute + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/config/subcommand/workflow/config_flags.go b/flytectl/cmd/config/subcommand/workflow/config_flags.go new file mode 100755 index 0000000000..f0f1064aff --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflow/config_flags.go @@ -0,0 +1,61 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflow + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), DefaultConfig.Version, "version of the workflow to be fetched.") + cmdFlags.BoolVar(&DefaultConfig.Latest, fmt.Sprintf("%v%v", prefix, "latest"), DefaultConfig.Latest, " flag to indicate to fetch the latest version, version flag will be ignored in this case") + cmdFlags.StringVar(&DefaultConfig.Filter.FieldSelector, fmt.Sprintf("%v%v", prefix, "filter.fieldSelector"), DefaultConfig.Filter.FieldSelector, "Specifies the Field selector") + cmdFlags.StringVar(&DefaultConfig.Filter.SortBy, fmt.Sprintf("%v%v", prefix, "filter.sortBy"), DefaultConfig.Filter.SortBy, "Specifies which field to sort results ") + cmdFlags.Int32Var(&DefaultConfig.Filter.Limit, fmt.Sprintf("%v%v", prefix, "filter.limit"), DefaultConfig.Filter.Limit, "Specifies the limit") + cmdFlags.BoolVar(&DefaultConfig.Filter.Asc, fmt.Sprintf("%v%v", prefix, "filter.asc"), DefaultConfig.Filter.Asc, "Specifies the sorting order. By default flytectl sort result in descending order") + cmdFlags.Int32Var(&DefaultConfig.Filter.Page, fmt.Sprintf("%v%v", prefix, "filter.page"), DefaultConfig.Filter.Page, "Specifies the page number, in case there are multiple pages of results") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/workflow/config_flags_test.go b/flytectl/cmd/config/subcommand/workflow/config_flags_test.go new file mode 100755 index 0000000000..46d13aeb87 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflow/config_flags_test.go @@ -0,0 +1,200 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflow + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_latest", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("latest", testValue) + if vBool, err := cmdFlags.GetBool("latest"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Latest) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.fieldSelector", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.fieldSelector", testValue) + if vString, err := cmdFlags.GetString("filter.fieldSelector"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.FieldSelector) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.sortBy", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.sortBy", testValue) + if vString, err := cmdFlags.GetString("filter.sortBy"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Filter.SortBy) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.limit", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.limit", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.limit"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Limit) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.asc", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.asc", testValue) + if vBool, err := cmdFlags.GetBool("filter.asc"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Filter.Asc) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_filter.page", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("filter.page", testValue) + if vInt32, err := cmdFlags.GetInt32("filter.page"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.Filter.Page) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/workflow/workflow_config.go b/flytectl/cmd/config/subcommand/workflow/workflow_config.go new file mode 100644 index 0000000000..a731cd2f25 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflow/workflow_config.go @@ -0,0 +1,24 @@ +package workflow + +import ( + "github.com/flyteorg/flytectl/pkg/filters" +) + +//go:generate pflags Config --default-var DefaultConfig --bind-default-var + +var ( + wfDefaultFilter = filters.Filters{ + Limit: filters.DefaultLimit, + Page: 1, + } + DefaultConfig = &Config{ + Filter: wfDefaultFilter, + } +) + +// Config commandline configuration +type Config struct { + Version string `json:"version" pflag:",version of the workflow to be fetched."` + Latest bool `json:"latest" pflag:", flag to indicate to fetch the latest version, version flag will be ignored in this case"` + Filter filters.Filters `json:"filter" pflag:","` +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags.go new file mode 100755 index 0000000000..566789244a --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrDeleteConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrDeleteConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrDeleteConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrDeleteConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrDeleteConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrDeleteConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultDelConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultDelConfig.AttrFile, "attribute file name to be used for delete attribute for the resource type.") + cmdFlags.BoolVar(&DefaultDelConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultDelConfig.DryRun, "execute command without making any modifications.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags_test.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags_test.go new file mode 100755 index 0000000000..8a6b1b94f4 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrdeleteconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrDeleteConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrDeleteConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrDeleteConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrDeleteConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrDeleteConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrDeleteConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrDeleteConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrDeleteConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrDeleteConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(val, result)) +} + +func testDecodeRaw_AttrDeleteConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrDeleteConfig(vStringSlice, result)) +} + +func TestAttrDeleteConfig_GetPFlagSet(t *testing.T) { + val := AttrDeleteConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrDeleteConfig_SetFlags(t *testing.T) { + actual := AttrDeleteConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrDeleteConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags.go new file mode 100755 index 0000000000..7ea760f420 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags.go @@ -0,0 +1,56 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrFetchConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrFetchConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrFetchConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrFetchConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrFetchConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrFetchConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultFetchConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultFetchConfig.AttrFile, "attribute file name to be used for generating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultFetchConfig.Gen, fmt.Sprintf("%v%v", prefix, "gen"), DefaultFetchConfig.Gen, "generates an empty workflow execution config file with conformance to the api format.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags_test.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags_test.go new file mode 100755 index 0000000000..6d090a0fe4 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrfetchconfig_flags_test.go @@ -0,0 +1,130 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrFetchConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrFetchConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrFetchConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrFetchConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrFetchConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrFetchConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrFetchConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrFetchConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrFetchConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(val, result)) +} + +func testDecodeRaw_AttrFetchConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrFetchConfig(vStringSlice, result)) +} + +func TestAttrFetchConfig_GetPFlagSet(t *testing.T) { + val := AttrFetchConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrFetchConfig_SetFlags(t *testing.T) { + actual := AttrFetchConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_gen", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("gen", testValue) + if vBool, err := cmdFlags.GetBool("gen"); err == nil { + testDecodeJson_AttrFetchConfig(t, fmt.Sprintf("%v", vBool), &actual.Gen) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags.go new file mode 100755 index 0000000000..a8423cdb86 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (AttrUpdateConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (AttrUpdateConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (AttrUpdateConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in AttrUpdateConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg AttrUpdateConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("AttrUpdateConfig", pflag.ExitOnError) + cmdFlags.StringVar(&DefaultUpdateConfig.AttrFile, fmt.Sprintf("%v%v", prefix, "attrFile"), DefaultUpdateConfig.AttrFile, "attribute file name to be used for updating attribute for the resource type.") + cmdFlags.BoolVar(&DefaultUpdateConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), DefaultUpdateConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&DefaultUpdateConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), DefaultUpdateConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags_test.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags_test.go new file mode 100755 index 0000000000..5c470ca066 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/attrupdateconfig_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package workflowexecutionconfig + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsAttrUpdateConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementAttrUpdateConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsAttrUpdateConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookAttrUpdateConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementAttrUpdateConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_AttrUpdateConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookAttrUpdateConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_AttrUpdateConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_AttrUpdateConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(val, result)) +} + +func testDecodeRaw_AttrUpdateConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_AttrUpdateConfig(vStringSlice, result)) +} + +func TestAttrUpdateConfig_GetPFlagSet(t *testing.T) { + val := AttrUpdateConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestAttrUpdateConfig_SetFlags(t *testing.T) { + actual := AttrUpdateConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_attrFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("attrFile", testValue) + if vString, err := cmdFlags.GetString("attrFile"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vString), &actual.AttrFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_AttrUpdateConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/delete_config.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/delete_config.go new file mode 100644 index 0000000000..2ee094ab9e --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/delete_config.go @@ -0,0 +1,11 @@ +package workflowexecutionconfig + +//go:generate pflags AttrDeleteConfig --default-var DefaultDelConfig --bind-default-var + +// AttrDeleteConfig Matchable resource attributes configuration passed from command line +type AttrDeleteConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for delete attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` +} + +var DefaultDelConfig = &AttrDeleteConfig{} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/fetch_config.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/fetch_config.go new file mode 100644 index 0000000000..6a1c3e6988 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/fetch_config.go @@ -0,0 +1,10 @@ +package workflowexecutionconfig + +//go:generate pflags AttrFetchConfig --default-var DefaultFetchConfig --bind-default-var + +type AttrFetchConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for generating attribute for the resource type."` + Gen bool `json:"gen" pflag:",generates an empty workflow execution config file with conformance to the api format."` +} + +var DefaultFetchConfig = &AttrFetchConfig{} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config.go new file mode 100644 index 0000000000..33ef8e1677 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config.go @@ -0,0 +1,47 @@ +package workflowexecutionconfig + +import ( + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +// FileConfig shadow Config for WorkflowExecutionConfig. +// The shadow Config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. +// As the same structure is being used for both ProjectDomainAttribute/Workflowattribute +type FileConfig struct { + Project string `json:"project"` + Domain string `json:"domain"` + Workflow string `json:"workflow,omitempty"` + *admin.WorkflowExecutionConfig +} + +// Decorate decorator over WorkflowExecutionConfig. +func (t FileConfig) Decorate() *admin.MatchingAttributes { + return &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: t.WorkflowExecutionConfig, + }, + } +} + +// UnDecorate to uncover WorkflowExecutionConfig. +func (t *FileConfig) UnDecorate(matchingAttribute *admin.MatchingAttributes) { + if matchingAttribute == nil { + return + } + t.WorkflowExecutionConfig = matchingAttribute.GetWorkflowExecutionConfig() +} + +// GetProject from the WorkflowExecutionConfig +func (t FileConfig) GetProject() string { + return t.Project +} + +// GetDomain from the WorkflowExecutionConfig +func (t FileConfig) GetDomain() string { + return t.Domain +} + +// GetWorkflow from the WorkflowExecutionConfig +func (t FileConfig) GetWorkflow() string { + return t.Workflow +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config_test.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config_test.go new file mode 100644 index 0000000000..45454f4536 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/file_config_test.go @@ -0,0 +1,46 @@ +package workflowexecutionconfig + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" +) + +func TestFileConfig(t *testing.T) { + workflowExecutionConfigFileConfig := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ + MaxParallelism: 5, + }, + } + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: workflowExecutionConfigFileConfig.WorkflowExecutionConfig, + }, + } + t.Run("decorate", func(t *testing.T) { + assert.Equal(t, matchingAttr, workflowExecutionConfigFileConfig.Decorate()) + }) + + t.Run("decorate", func(t *testing.T) { + workflowExecutionConfigFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + } + workflowExecutionConfigFileConfigNew.UnDecorate(matchingAttr) + assert.Equal(t, workflowExecutionConfigFileConfig, workflowExecutionConfigFileConfigNew) + }) + t.Run("get project domain workflow", func(t *testing.T) { + workflowExecutionConfigFileConfigNew := FileConfig{ + Project: "dummyProject", + Domain: "dummyDomain", + Workflow: "workflow", + } + assert.Equal(t, "dummyProject", workflowExecutionConfigFileConfigNew.GetProject()) + assert.Equal(t, "dummyDomain", workflowExecutionConfigFileConfigNew.GetDomain()) + assert.Equal(t, "workflow", workflowExecutionConfigFileConfigNew.GetWorkflow()) + }) +} diff --git a/flytectl/cmd/config/subcommand/workflowexecutionconfig/update_config.go b/flytectl/cmd/config/subcommand/workflowexecutionconfig/update_config.go new file mode 100644 index 0000000000..2b244000e2 --- /dev/null +++ b/flytectl/cmd/config/subcommand/workflowexecutionconfig/update_config.go @@ -0,0 +1,12 @@ +package workflowexecutionconfig + +//go:generate pflags AttrUpdateConfig --default-var DefaultUpdateConfig --bind-default-var + +// AttrUpdateConfig Matchable resource attributes configuration passed from command line +type AttrUpdateConfig struct { + AttrFile string `json:"attrFile" pflag:",attribute file name to be used for updating attribute for the resource type."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +var DefaultUpdateConfig = &AttrUpdateConfig{} diff --git a/flytectl/cmd/configuration/configuration.go b/flytectl/cmd/configuration/configuration.go new file mode 100644 index 0000000000..010416e36d --- /dev/null +++ b/flytectl/cmd/configuration/configuration.go @@ -0,0 +1,160 @@ +package configuration + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/flyteorg/flytectl/pkg/util" + + "github.com/flyteorg/flytectl/pkg/configutil" + + "github.com/flyteorg/flyte/flytestdlib/config/viper" + initConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/config" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" + "github.com/go-ozzo/ozzo-validation/v4/is" + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using Sphinx. +const ( + initCmdShort = `Generates a Flytectl config file in the user's home directory.` + initCmdLong = `Creates a Flytectl config file in Flyte directory i.e ~/.flyte. + +Generate Sandbox config: +:: + + flytectl config init + +Flyte Sandbox is a fully standalone minimal environment for running Flyte. +Read more about the Sandbox deployment :ref:` + "`here `" + `. + +Generate remote cluster config: +:: + + flytectl config init --host=flyte.myexample.com + +By default, the connection is secure. +Read more about remote deployment :ref:` + "`here `" + `. + +Generate remote cluster config with insecure connection: +:: + + flytectl config init --host=flyte.myexample.com --insecure + + Generate remote cluster config with separate console endpoint: + :: + + flytectl config init --host=flyte.myexample.com --console=console.myexample.com + +Generate Flytectl config with a storage provider: +:: + + flytectl config init --host=flyte.myexample.com --storage +` +) + +var endpointPrefix = [3]string{"dns:///", "http://", "https://"} + +// CreateConfigCommand will return configuration command +func CreateConfigCommand() *cobra.Command { + configCmd := viper.GetConfigCommand() + + getResourcesFuncs := map[string]cmdcore.CommandEntry{ + "init": {CmdFunc: configInitFunc, Aliases: []string{""}, ProjectDomainNotRequired: true, + Short: initCmdShort, + Long: initCmdLong, PFlagProvider: initConfig.DefaultConfig}, + } + + configCmd.Flags().BoolVar(&initConfig.DefaultConfig.Force, "force", false, "Force to overwrite the default config file without confirmation") + + cmdcore.AddCommands(configCmd, getResourcesFuncs) + return configCmd +} + +func configInitFunc(ctx context.Context, args []string, cmdCtx cmdcore.CommandContext) error { + return initFlytectlConfig(os.Stdin) +} + +func initFlytectlConfig(reader io.Reader) error { + + if err := util.SetupFlyteDir(); err != nil { + return err + } + + templateValues := configutil.ConfigTemplateSpec{ + Host: "dns:///localhost:30080", + Insecure: true, + } + templateStr := configutil.GetTemplate() + + if len(initConfig.DefaultConfig.Host) > 0 { + trimHost := trimEndpoint(initConfig.DefaultConfig.Host) + if !validateEndpointName(trimHost) { + return fmt.Errorf("%s invalid, please use a valid admin endpoint", trimHost) + } + templateValues.Host = fmt.Sprintf("dns:///%s", trimHost) + templateValues.Insecure = initConfig.DefaultConfig.Insecure + } + if len(initConfig.DefaultConfig.Console) > 0 { + trimConsole := trimEndpoint(initConfig.DefaultConfig.Console) + if !validateEndpointName(trimConsole) { + return fmt.Errorf("%s invalid, please use a valid console endpoint", trimConsole) + } + templateValues.Console = initConfig.DefaultConfig.Console + } + var _err error + if _, err := os.Stat(configutil.ConfigFile); os.IsNotExist(err) { + _err = configutil.SetupConfig(configutil.ConfigFile, templateStr, templateValues) + } else { + if initConfig.DefaultConfig.Force || cmdUtil.AskForConfirmation(fmt.Sprintf("This action will overwrite an existing config file at [%s]. Do you want to continue?", configutil.ConfigFile), reader) { + if err := os.Remove(configutil.ConfigFile); err != nil { + return err + } + _err = configutil.SetupConfig(configutil.ConfigFile, templateStr, templateValues) + } + } + if _err != nil { + return _err + } + fmt.Printf("Init flytectl config file at [%s]", configutil.ConfigFile) + return nil +} + +func trimEndpoint(hostname string) string { + for _, prefix := range endpointPrefix { + hostname = strings.TrimPrefix(hostname, prefix) + } + return hostname + +} + +func validateEndpointName(endPoint string) bool { + var validate = false + if endPoint == "localhost" { + return true + } + if err := is.URL.Validate(endPoint); err != nil { + return false + } + endPointParts := strings.Split(endPoint, ":") + if len(endPointParts) <= 2 && len(endPointParts) > 0 { + if err := is.DNSName.Validate(endPointParts[0]); !errors.Is(err, is.ErrDNSName) && err == nil { + validate = true + } + if err := is.IP.Validate(endPointParts[0]); !errors.Is(err, is.ErrIP) && err == nil { + validate = true + } + if len(endPointParts) == 2 { + if err := is.Port.Validate(endPointParts[1]); err != nil { + return false + } + } + } + + return validate +} diff --git a/flytectl/cmd/configuration/configuration_test.go b/flytectl/cmd/configuration/configuration_test.go new file mode 100644 index 0000000000..f48fe84e2f --- /dev/null +++ b/flytectl/cmd/configuration/configuration_test.go @@ -0,0 +1,101 @@ +package configuration + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "strings" + "testing" + + admin2 "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + + "github.com/flyteorg/flytectl/pkg/configutil" + + initConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/util" + + "github.com/stretchr/testify/assert" +) + +func TestCreateInitCommand(t *testing.T) { + configCmd := CreateConfigCommand() + assert.Equal(t, configCmd.Use, "config") + assert.Equal(t, configCmd.Short, "Runs various config commands, look at the help of this command to get a list of available commands..") + fmt.Println(configCmd.Commands()) + assert.Equal(t, 4, len(configCmd.Commands())) + cmdNouns := configCmd.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, "discover", cmdNouns[0].Use) + assert.Equal(t, "Searches for a config in one of the default search paths.", cmdNouns[0].Short) + assert.Equal(t, "docs", cmdNouns[1].Use) + assert.Equal(t, "Generate configuration documentation in rst format", cmdNouns[1].Short) + + assert.Equal(t, "init", cmdNouns[2].Use) + assert.Equal(t, initCmdShort, cmdNouns[2].Short) + assert.Equal(t, "validate", cmdNouns[3].Use) + assert.Equal(t, "Validates the loaded config.", cmdNouns[3].Short) +} + +func TestSetupConfigFunc(t *testing.T) { + var yes = strings.NewReader("Yes") + var no = strings.NewReader("No") + var empty = strings.NewReader("") + mockOutStream := new(io.Writer) + ctx := context.Background() + _ = os.Remove(configutil.FlytectlConfig) + + _ = util.SetupFlyteDir() + + mockClient := admin2.InitializeMockClientset() + cmdCtx := cmdCore.NewCommandContext(mockClient, *mockOutStream) + err := configInitFunc(ctx, []string{}, cmdCtx) + initConfig.DefaultConfig.Host = "" + assert.Nil(t, err) + + initConfig.DefaultConfig.Force = false + assert.Nil(t, initFlytectlConfig(yes)) + assert.Nil(t, initFlytectlConfig(no)) + + initConfig.DefaultConfig.Force = true + assert.Nil(t, initFlytectlConfig(empty)) + + initConfig.DefaultConfig.Host = "flyte.org" + assert.Nil(t, initFlytectlConfig(no)) + initConfig.DefaultConfig.Host = "localhost:30081" + assert.Nil(t, initFlytectlConfig(no)) + assert.Nil(t, initFlytectlConfig(yes)) +} + +func TestTrimFunc(t *testing.T) { + assert.Equal(t, trimEndpoint("dns:///localhost"), "localhost") + assert.Equal(t, trimEndpoint("http://localhost"), "localhost") + assert.Equal(t, trimEndpoint("https://localhost"), "localhost") +} + +func TestValidateEndpointName(t *testing.T) { + assert.Equal(t, true, validateEndpointName("8093405779.ap-northeast-2.elb.amazonaws.com:81")) + assert.Equal(t, true, validateEndpointName("8093405779.ap-northeast-2.elb.amazonaws.com")) + assert.Equal(t, false, validateEndpointName("8093405779.ap-northeast-2.elb.amazonaws.com:81/console")) + assert.Equal(t, true, validateEndpointName("localhost")) + assert.Equal(t, true, validateEndpointName("127.0.0.1")) + assert.Equal(t, true, validateEndpointName("127.0.0.1:30086")) + assert.Equal(t, true, validateEndpointName("112.11.1.1")) + assert.Equal(t, true, validateEndpointName("112.11.1.1:8080")) + assert.Equal(t, false, validateEndpointName("112.11.1.1:8080/console")) + assert.Equal(t, false, validateEndpointName("flyte")) +} + +func TestForceFlagInCreateConfigCommand(t *testing.T) { + cmd := CreateConfigCommand() + assert.False(t, initConfig.DefaultConfig.Force) + err := cmd.Flags().Parse([]string{"--force"}) + assert.Nil(t, err) + assert.True(t, initConfig.DefaultConfig.Force) +} diff --git a/flytectl/cmd/core/cmd.go b/flytectl/cmd/core/cmd.go new file mode 100644 index 0000000000..2d00adc04e --- /dev/null +++ b/flytectl/cmd/core/cmd.go @@ -0,0 +1,98 @@ +package cmdcore + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/pkg/pkce" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type PFlagProvider interface { + GetPFlagSet(prefix string) *pflag.FlagSet +} + +type CommandEntry struct { + ProjectDomainNotRequired bool + CmdFunc CommandFunc + Aliases []string + Short string + Long string + PFlagProvider PFlagProvider + DisableFlyteClient bool +} + +func AddCommands(rootCmd *cobra.Command, cmdFuncs map[string]CommandEntry) { + for resource, cmdEntry := range cmdFuncs { + cmd := &cobra.Command{ + Use: resource, + Short: cmdEntry.Short, + Long: cmdEntry.Long, + Aliases: cmdEntry.Aliases, + RunE: generateCommandFunc(cmdEntry), + SilenceUsage: true, + } + + if cmdEntry.PFlagProvider != nil { + cmd.Flags().AddFlagSet(cmdEntry.PFlagProvider.GetPFlagSet("")) + } + + rootCmd.AddCommand(cmd) + } +} + +func generateCommandFunc(cmdEntry CommandEntry) func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + + if !cmdEntry.ProjectDomainNotRequired { + if config.GetConfig().Project == "" { + return fmt.Errorf("project and domain are required parameters") + } + if config.GetConfig().Domain == "" { + return fmt.Errorf("project and domain are required parameters") + } + } + if _, err := config.GetConfig().OutputFormat(); err != nil { + return err + } + + adminCfg := admin.GetConfig(ctx) + if len(adminCfg.Endpoint.String()) == 0 { + return cmdEntry.CmdFunc(ctx, args, CommandContext{}) + } + + cmdCtx := NewCommandContextNoClient(cmd.OutOrStdout()) + if !cmdEntry.DisableFlyteClient { + clientSet, err := admin.ClientSetBuilder().WithConfig(admin.GetConfig(ctx)). + WithTokenCache(pkce.TokenCacheKeyringProvider{ + ServiceUser: fmt.Sprintf("%s:%s", adminCfg.Endpoint.String(), pkce.KeyRingServiceUser), + ServiceName: pkce.KeyRingServiceName, + }).Build(ctx) + if err != nil { + return err + } + cmdCtx = NewCommandContext(clientSet, cmd.OutOrStdout()) + } + + err := cmdEntry.CmdFunc(ctx, args, cmdCtx) + if err != nil { + if s, ok := status.FromError(err); ok { + if s.Code() == codes.Unavailable || s.Code() == codes.Unauthenticated || s.Code() == codes.Unknown { + return errors.WithMessage(err, + fmt.Sprintf("Connection Info: [Endpoint: %s, InsecureConnection?: %v, AuthMode: %v]", adminCfg.Endpoint.String(), adminCfg.UseInsecureConnection, adminCfg.AuthType)) + } + } + return err + } + return nil + } +} diff --git a/flytectl/cmd/core/cmd_ctx.go b/flytectl/cmd/core/cmd_ctx.go new file mode 100644 index 0000000000..d492c53c4a --- /dev/null +++ b/flytectl/cmd/core/cmd_ctx.go @@ -0,0 +1,82 @@ +package cmdcore + +import ( + "io" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + "github.com/flyteorg/flytectl/pkg/ext" +) + +type CommandContext struct { + clientSet *admin.Clientset + adminClientFetcherExt ext.AdminFetcherExtInterface + adminClientUpdateExt ext.AdminUpdaterExtInterface + adminClientDeleteExt ext.AdminDeleterExtInterface + in io.Reader + out io.Writer +} + +// NewCommandContextNoClient returns a new commandContext +func NewCommandContextNoClient(out io.Writer) CommandContext { + return NewCommandContext(nil, out) +} + +func NewCommandContext(clientSet *admin.Clientset, out io.Writer) CommandContext { + var adminClient service.AdminServiceClient + if clientSet != nil { + adminClient = clientSet.AdminClient() + } + return NewCommandContextWithExt( + clientSet, + &ext.AdminFetcherExtClient{AdminClient: adminClient}, + &ext.AdminUpdaterExtClient{AdminClient: adminClient}, + &ext.AdminDeleterExtClient{AdminClient: adminClient}, + out, + ) +} + +// NewCommandContextWithExt construct command context with injected extensions. Helps in injecting mocked ones for testing. +func NewCommandContextWithExt( + clientSet *admin.Clientset, + fetcher ext.AdminFetcherExtInterface, + updater ext.AdminUpdaterExtInterface, + deleter ext.AdminDeleterExtInterface, + out io.Writer) CommandContext { + return CommandContext{ + clientSet: clientSet, + out: out, + adminClientFetcherExt: fetcher, + adminClientUpdateExt: updater, + adminClientDeleteExt: deleter, + } +} + +func (c CommandContext) AdminClient() service.AdminServiceClient { + return c.clientSet.AdminClient() +} + +func (c CommandContext) ClientSet() *admin.Clientset { + return c.clientSet +} + +func (c CommandContext) OutputPipe() io.Writer { + return c.out +} + +func (c CommandContext) InputPipe() io.Reader { + return c.in +} + +func (c CommandContext) AdminFetcherExt() ext.AdminFetcherExtInterface { + return c.adminClientFetcherExt +} + +func (c CommandContext) AdminUpdaterExt() ext.AdminUpdaterExtInterface { + return c.adminClientUpdateExt +} + +func (c CommandContext) AdminDeleterExt() ext.AdminDeleterExtInterface { + return c.adminClientDeleteExt +} diff --git a/flytectl/cmd/core/cmd_test.go b/flytectl/cmd/core/cmd_test.go new file mode 100644 index 0000000000..13737b4a61 --- /dev/null +++ b/flytectl/cmd/core/cmd_test.go @@ -0,0 +1,38 @@ +package cmdcore + +import ( + "context" + "net/url" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + "github.com/flyteorg/flyte/flytestdlib/config" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +func testCommandFunc(ctx context.Context, args []string, cmdCtx CommandContext) error { + return nil +} + +func TestGenerateCommandFunc(t *testing.T) { + t.Run("dummy host name", func(t *testing.T) { + adminCfg := admin.GetConfig(context.Background()) + adminCfg.Endpoint = config.URL{URL: url.URL{Host: "dummyHost"}} + adminCfg.AuthType = admin.AuthTypePkce + rootCmd := &cobra.Command{} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + fn := generateCommandFunc(cmdEntry) + assert.Nil(t, fn(rootCmd, []string{})) + }) + + t.Run("host is not configured", func(t *testing.T) { + adminCfg := admin.GetConfig(context.Background()) + adminCfg.Endpoint = config.URL{URL: url.URL{Host: ""}} + rootCmd := &cobra.Command{} + cmdEntry := CommandEntry{CmdFunc: testCommandFunc, ProjectDomainNotRequired: true} + fn := generateCommandFunc(cmdEntry) + assert.Nil(t, fn(rootCmd, []string{})) + }) +} diff --git a/flytectl/cmd/core/types.go b/flytectl/cmd/core/types.go new file mode 100644 index 0000000000..dbb0b9d42a --- /dev/null +++ b/flytectl/cmd/core/types.go @@ -0,0 +1,5 @@ +package cmdcore + +import "context" + +type CommandFunc func(ctx context.Context, args []string, cmdCtx CommandContext) error diff --git a/flytectl/cmd/create/create.go b/flytectl/cmd/create/create.go new file mode 100644 index 0000000000..81b029c5bf --- /dev/null +++ b/flytectl/cmd/create/create.go @@ -0,0 +1,36 @@ +package create + +import ( + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using Sphinx. +const ( + createCmdShort = `Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects.` + createCmdLong = ` +Create Flyte resource; if a project: +:: + + flytectl create project --file project.yaml +` +) + +// RemoteCreateCommand will return create Flyte resource commands +func RemoteCreateCommand() *cobra.Command { + createCmd := &cobra.Command{ + Use: "create", + Short: createCmdShort, + Long: createCmdLong, + } + createResourcesFuncs := map[string]cmdcore.CommandEntry{ + "project": {CmdFunc: createProjectsCommand, Aliases: []string{"projects"}, ProjectDomainNotRequired: true, PFlagProvider: project.DefaultProjectConfig, Short: projectShort, + Long: projectLong}, + "execution": {CmdFunc: createExecutionCommand, Aliases: []string{"executions"}, ProjectDomainNotRequired: false, PFlagProvider: executionConfig, Short: executionShort, + Long: executionLong}, + } + cmdcore.AddCommands(createCmd, createResourcesFuncs) + return createCmd +} diff --git a/flytectl/cmd/create/create_test.go b/flytectl/cmd/create/create_test.go new file mode 100644 index 0000000000..bfa6816e91 --- /dev/null +++ b/flytectl/cmd/create/create_test.go @@ -0,0 +1,32 @@ +package create + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flytectl/cmd/testutils" +) + +const testDataFolder = "../testdata/" + +var setup = testutils.Setup + +func TestCreateCommand(t *testing.T) { + createCommand := RemoteCreateCommand() + assert.Equal(t, createCommand.Use, "create") + assert.Equal(t, createCommand.Short, "Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects.") + assert.Equal(t, len(createCommand.Commands()), 2) + cmdNouns := createCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + assert.Equal(t, cmdNouns[0].Use, "execution") + assert.Equal(t, cmdNouns[0].Aliases, []string{"executions"}) + assert.Equal(t, cmdNouns[0].Short, executionShort) + assert.Equal(t, cmdNouns[1].Use, "project") + assert.Equal(t, cmdNouns[1].Aliases, []string{"projects"}) + assert.Equal(t, cmdNouns[1].Short, "Creates project resources.") +} diff --git a/flytectl/cmd/create/execution.go b/flytectl/cmd/create/execution.go new file mode 100644 index 0000000000..7fc62f2045 --- /dev/null +++ b/flytectl/cmd/create/execution.go @@ -0,0 +1,246 @@ +package create + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionShort = "Creates execution resources." + executionLong = ` +Create execution resources for a given workflow or task in a project and domain. + +There are three steps to generate an execution, as outlined below: + +1. Generate the execution spec file using the :ref:` + "`get task `" + ` command. +:: + + flytectl get tasks -d development -p flytesnacks core.control_flow.merge_sort.merge --version v2 --execFile execution_spec.yaml + +The generated file would look similar to the following: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +2. [Optional] Update the inputs for the execution, if needed. +The generated spec file can be modified to change the input values, as shown below: + +.. code-block:: yaml + + iamRoleARN: 'arn:aws:iam::12345678:role/defaultrole' + inputs: + sorted_list1: + - 2 + - 4 + - 6 + sorted_list2: + - 1 + - 3 + - 5 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +3. [Optional] Update the envs for the execution, if needed. +The generated spec file can be modified to change the envs values, as shown below: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + envs: + foo: bar + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +4. Run the execution by passing the generated YAML file. +The file can then be passed through the command line. +It is worth noting that the source's and target's project and domain can be different. +:: + + flytectl create execution --execFile execution_spec.yaml -p flytesnacks -d staging --targetProject flytesnacks + +5. To relaunch an execution, pass the current execution ID as follows: + +:: + + flytectl create execution --relaunch ffb31066a0f8b4d52b77 -p flytesnacks -d development + +6. To recover an execution, i.e., recreate it from the last known failure point for previously-run workflow execution, run: + +:: + + flytectl create execution --recover ffb31066a0f8b4d52b77 -p flytesnacks -d development + +See :ref:` + "`ref_flyteidl.admin.ExecutionRecoverRequest`" + ` for more details. + +7. You can create executions idempotently by naming them. This is also a way to *name* an execution for discovery. Note, +an execution id has to be unique within a project domain. So if the *name* matches an existing execution an already exists exceptioj +will be raised. + +:: + + flytectl create execution --recover ffb31066a0f8b4d52b77 -p flytesnacks -d development custom_name + +8. Generic/Struct/Dataclass/JSON types are supported for execution in a similar manner. +The following is an example of how generic data can be specified while creating the execution. + +:: + + flytectl get task -d development -p flytesnacks core.type_system.custom_objects.add --execFile adddatanum.yaml + +The generated file would look similar to this. Here, empty values have been dumped for generic data types 'x' and 'y'. +:: + + iamRoleARN: "" + inputs: + "x": {} + "y": {} + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.type_system.custom_objects.add + version: v3 + +9. Modified file with struct data populated for 'x' and 'y' parameters for the task "core.type_system.custom_objects.add": + +:: + + iamRoleARN: "arn:aws:iam::123456789:role/dummy" + inputs: + "x": + "x": 2 + "y": ydatafory + "z": + 1 : "foo" + 2 : "bar" + "y": + "x": 3 + "y": ydataforx + "z": + 3 : "buzz" + 4 : "lightyear" + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.type_system.custom_objects.add + version: v3 + +10. If you have configured a plugin that implements github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces/WorkflowExecutor + that supports cluster pools, then when creating a new execution, you can assign it to a specific cluster pool: + +:: + + flytectl create execution --execFile execution_spec.yaml -p flytesnacks -d development --clusterPool my-gpu-cluster +` +) + +//go:generate pflags ExecutionConfig --default-var executionConfig --bind-default-var + +// ExecutionConfig hold configuration for create execution flags and configuration of the actual task or workflow to be launched. +type ExecutionConfig struct { + // pflag section + ExecFile string `json:"execFile,omitempty" pflag:",file for the execution params. If not specified defaults to <_name>.execution_spec.yaml"` + TargetDomain string `json:"targetDomain" pflag:",project where execution needs to be created. If not specified configured domain would be used."` + TargetProject string `json:"targetProject" pflag:",project where execution needs to be created. If not specified configured project would be used."` + KubeServiceAcct string `json:"kubeServiceAcct" pflag:",kubernetes service account AuthRole for launching execution."` + IamRoleARN string `json:"iamRoleARN" pflag:",iam role ARN AuthRole for launching execution."` + Relaunch string `json:"relaunch" pflag:",execution id to be relaunched."` + Recover string `json:"recover" pflag:",execution id to be recreated from the last known failure point."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Version string `json:"version" pflag:",specify version of execution workflow/task."` + ClusterPool string `json:"clusterPool" pflag:",specify which cluster pool to assign execution to."` + OverwriteCache bool `json:"overwriteCache" pflag:",skip cached results when performing execution,causing all outputs to be re-calculated and stored data to be overwritten. Does not work for recovered executions."` + // Non plfag section is read from the execution config generated by get task/launch plan + Workflow string `json:"workflow,omitempty"` + Task string `json:"task,omitempty"` + Inputs map[string]interface{} `json:"inputs" pflag:"-"` + Envs map[string]string `json:"envs" pflag:"-"` +} + +type ExecutionType int + +const ( + Task ExecutionType = iota + Workflow + Relaunch + Recover +) + +type ExecutionParams struct { + name string + execType ExecutionType +} + +var executionConfig = &ExecutionConfig{} + +func createExecutionCommand(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + sourceProject := config.GetConfig().Project + sourceDomain := config.GetConfig().Domain + + var targetExecName string + if len(args) > 0 { + targetExecName = args[0] + } + + execParams, err := readConfigAndValidate(config.GetConfig().Project, config.GetConfig().Domain) + if err != nil { + return err + } + var executionRequest *admin.ExecutionCreateRequest + switch execParams.execType { + case Relaunch: + return relaunchExecution(ctx, execParams.name, sourceProject, sourceDomain, cmdCtx, executionConfig, targetExecName) + case Recover: + return recoverExecution(ctx, execParams.name, sourceProject, sourceDomain, cmdCtx, executionConfig, targetExecName) + case Task: + executionRequest, err = createExecutionRequestForTask(ctx, execParams.name, sourceProject, sourceDomain, cmdCtx, executionConfig, targetExecName) + if err != nil { + return err + } + case Workflow: + executionRequest, err = createExecutionRequestForWorkflow(ctx, execParams.name, sourceProject, sourceDomain, cmdCtx, executionConfig, targetExecName) + if err != nil { + return err + } + default: + return fmt.Errorf("invalid execution type %v", execParams.execType) + } + + if executionConfig.DryRun { + logger.Debugf(ctx, "skipping CreateExecution request (DryRun)") + } else { + exec, _err := cmdCtx.AdminClient().CreateExecution(ctx, executionRequest) + if _err != nil { + return _err + } + fmt.Printf("execution identifier %v\n", exec.Id) + } + return nil +} diff --git a/flytectl/cmd/create/execution_test.go b/flytectl/cmd/create/execution_test.go new file mode 100644 index 0000000000..ee5dc268ef --- /dev/null +++ b/flytectl/cmd/create/execution_test.go @@ -0,0 +1,336 @@ +package create + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/cmd/testutils" +) + +type createSuite struct { + suite.Suite + testutils.TestStruct + originalExecConfig ExecutionConfig +} + +func (s *createSuite) SetupTest() { + s.TestStruct = setup() + + // TODO: migrate to new command context from testutils + s.CmdCtx = cmdCore.NewCommandContext(s.MockClient, s.MockOutStream) + s.originalExecConfig = *executionConfig +} + +func (s *createSuite) TearDownTest() { + orig := s.originalExecConfig + executionConfig = &orig + s.MockAdminClient.AssertExpectations(s.T()) +} + +func (s *createSuite) onGetTask() { + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + } + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + task1 := &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v2", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + } + s.MockAdminClient.OnGetTaskMatch(s.Ctx, mock.Anything).Return(task1, nil) +} + +func (s *createSuite) onGetLaunchPlan() { + parameterMap := map[string]*core.Parameter{ + "numbers": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + }, + }, + "numbers_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + "run_local_at_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + Behavior: &core.Parameter_Default{ + Default: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 10, + }, + }, + }, + }, + }, + }, + }, + }, + } + launchPlan1 := &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "core.control_flow.merge_sort.merge_sort", + Version: "v3", + }, + Spec: &admin.LaunchPlanSpec{ + DefaultInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + } + objectGetRequest := &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: "core.control_flow.merge_sort.merge_sort", + Version: "v3", + }, + } + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan1, nil).Once() +} + +func (s *createSuite) Test_CreateTaskExecution() { + s.onGetTask() + executionCreateResponseTask := &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "flytesnacks", + Domain: "development", + Name: "ff513c0e44b5b4a35aa5", + }, + } + expected := &admin.ExecutionCreateRequest{ + Project: "dummyProject", + Domain: "dummyDomain", + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "dummyProject", + Domain: "dummyDomain", + Name: "task1", + Version: "v2", + }, + Metadata: &admin.ExecutionMetadata{Mode: admin.ExecutionMetadata_MANUAL, Principal: "sdk", Nesting: 0}, + AuthRole: &admin.AuthRole{ + KubernetesServiceAccount: executionConfig.KubeServiceAcct, + AssumableIamRole: "iamRoleARN", + }, + SecurityContext: &core.SecurityContext{ + RunAs: &core.Identity{ + K8SServiceAccount: executionConfig.KubeServiceAcct, + IamRole: "iamRoleARN", + }, + }, + ClusterAssignment: &admin.ClusterAssignment{ClusterPoolName: "gpu"}, + Envs: &admin.Envs{}, + }, + } + s.MockAdminClient. + OnCreateExecutionMatch(s.Ctx, mock.Anything). + Run(func(args mock.Arguments) { + actual := args.Get(1).(*admin.ExecutionCreateRequest) + actual.Name = "" + actual.Inputs = nil + s.True(proto.Equal(expected, actual), actual.String()) + }). + Return(executionCreateResponseTask, nil). + Once() + executionConfig.ExecFile = testDataFolder + "task_execution_spec_with_iamrole.yaml" + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.NoError(err) + s.TearDownAndVerify(s.T(), `execution identifier project:"flytesnacks" domain:"development" name:"ff513c0e44b5b4a35aa5" `) +} + +func (s *createSuite) Test_CreateTaskExecution_GetTaskError() { + expected := fmt.Errorf("error") + s.MockAdminClient.OnGetTaskMatch(s.Ctx, mock.Anything).Return(nil, expected).Once() + executionConfig.ExecFile = testDataFolder + "task_execution_spec.yaml" + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.Equal(expected, err) +} + +func (s *createSuite) Test_CreateTaskExecution_CreateExecutionError() { + s.onGetTask() + s.MockAdminClient. + OnCreateExecutionMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("error launching task")). + Once() + executionConfig.ExecFile = testDataFolder + "task_execution_spec.yaml" + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.EqualError(err, "error launching task") +} + +func (s *createSuite) Test_CreateLaunchPlanExecution() { + executionCreateResponseLP := &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "flytesnacks", + Domain: "development", + Name: "f652ea3596e7f4d80a0e", + }, + } + s.onGetLaunchPlan() + s.MockAdminClient.OnCreateExecutionMatch(s.Ctx, mock.Anything).Return(executionCreateResponseLP, nil) + executionConfig.ExecFile = testDataFolder + "launchplan_execution_spec.yaml" + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.NoError(err) + s.TearDownAndVerify(s.T(), `execution identifier project:"flytesnacks" domain:"development" name:"f652ea3596e7f4d80a0e" `) +} + +func (s *createSuite) Test_CreateLaunchPlan_GetLaunchPlanError() { + expected := fmt.Errorf("error") + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, mock.Anything).Return(nil, expected).Once() + executionConfig.ExecFile = testDataFolder + "launchplan_execution_spec.yaml" + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.Equal(expected, err) +} + +func (s *createSuite) Test_CreateRelaunchExecution() { + relaunchExecResponse := &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "flytesnacks", + Domain: "development", + Name: "f652ea3596e7f4d80a0e", + }, + } + executionConfig.Relaunch = relaunchExecResponse.Id.Name + relaunchRequest := &admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: executionConfig.Relaunch, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + } + s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(relaunchExecResponse, nil).Once() + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.NoError(err) + s.TearDownAndVerify(s.T(), `execution identifier project:"flytesnacks" domain:"development" name:"f652ea3596e7f4d80a0e" `) +} + +func (s *createSuite) Test_CreateRecoverExecution() { + originalExecutionName := "abc123" + recoverExecResponse := &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "flytesnacks", + Domain: "development", + Name: "f652ea3596e7f4d80a0e", + }, + } + executionConfig.Recover = originalExecutionName + recoverRequest := &admin.ExecutionRecoverRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: originalExecutionName, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + } + s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(recoverExecResponse, nil).Once() + + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + + s.NoError(err) + s.TearDownAndVerify(s.T(), `execution identifier project:"flytesnacks" domain:"development" name:"f652ea3596e7f4d80a0e"`) +} + +func (s *createSuite) TestCreateExecutionFuncInvalid() { + executionConfig.Relaunch = "" + executionConfig.ExecFile = "" + err := createExecutionCommand(s.Ctx, nil, s.CmdCtx) + s.EqualError(err, "executionConfig, relaunch and recover can't be empty. Run the flytectl get task/launchplan to generate the config") + + executionConfig.ExecFile = "Invalid-file" + err = createExecutionCommand(s.Ctx, nil, s.CmdCtx) + s.EqualError(err, fmt.Sprintf("unable to read from %v yaml file", executionConfig.ExecFile)) + + executionConfig.ExecFile = testDataFolder + "invalid_execution_spec.yaml" + err = createExecutionCommand(s.Ctx, nil, s.CmdCtx) + s.EqualError(err, "either task or workflow name should be specified to launch an execution") +} + +func (s *createSuite) Test_CreateTaskExecution_DryRun() { + s.onGetTask() + executionConfig.DryRun = true + executionConfig.ExecFile = testDataFolder + "task_execution_spec_with_iamrole.yaml" + + err := createExecutionCommand(s.Ctx, []string{"target"}, s.CmdCtx) + + s.NoError(err) +} + +func TestCreateSuite(t *testing.T) { + suite.Run(t, &createSuite{originalExecConfig: *executionConfig}) +} diff --git a/flytectl/cmd/create/execution_util.go b/flytectl/cmd/create/execution_util.go new file mode 100644 index 0000000000..63b6fe0871 --- /dev/null +++ b/flytectl/cmd/create/execution_util.go @@ -0,0 +1,265 @@ +package create + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/google/uuid" + "sigs.k8s.io/yaml" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdGet "github.com/flyteorg/flytectl/cmd/get" +) + +func createExecutionRequestForWorkflow(ctx context.Context, workflowName, project, domain string, + cmdCtx cmdCore.CommandContext, executionConfig *ExecutionConfig, targetExecName string) (*admin.ExecutionCreateRequest, error) { + // Fetch the launch plan + lp, err := cmdCtx.AdminFetcherExt().FetchLPVersion(ctx, workflowName, executionConfig.Version, project, domain) + if err != nil { + return nil, err + } + + // Create workflow params literal map + workflowParams := cmdGet.WorkflowParams(lp) + paramLiterals, err := MakeLiteralForParams(executionConfig.Inputs, workflowParams) + if err != nil { + return nil, err + } + + var inputs = &core.LiteralMap{ + Literals: paramLiterals, + } + + envs := makeEnvs(executionConfig) + + // Set both deprecated field and new field for security identity passing + var securityContext *core.SecurityContext + var authRole *admin.AuthRole + + if len(executionConfig.KubeServiceAcct) > 0 || len(executionConfig.IamRoleARN) > 0 { + authRole = &admin.AuthRole{ + KubernetesServiceAccount: executionConfig.KubeServiceAcct, + AssumableIamRole: executionConfig.IamRoleARN, + } + securityContext = &core.SecurityContext{ + RunAs: &core.Identity{ + K8SServiceAccount: executionConfig.KubeServiceAcct, + IamRole: executionConfig.IamRoleARN, + }, + } + } + + return createExecutionRequest(lp.Id, inputs, envs, securityContext, authRole, targetExecName), nil +} + +func createExecutionRequestForTask(ctx context.Context, taskName string, project string, domain string, + cmdCtx cmdCore.CommandContext, executionConfig *ExecutionConfig, targetExecName string) (*admin.ExecutionCreateRequest, error) { + // Fetch the task + task, err := cmdCtx.AdminFetcherExt().FetchTaskVersion(ctx, taskName, executionConfig.Version, project, domain) + if err != nil { + return nil, err + } + // Create task variables literal map + taskInputs := cmdGet.TaskInputs(task) + variableLiterals, err := MakeLiteralForVariables(executionConfig.Inputs, taskInputs) + if err != nil { + return nil, err + } + + var inputs = &core.LiteralMap{ + Literals: variableLiterals, + } + + envs := makeEnvs(executionConfig) + + // Set both deprecated field and new field for security identity passing + var securityContext *core.SecurityContext + var authRole *admin.AuthRole + + if len(executionConfig.KubeServiceAcct) > 0 || len(executionConfig.IamRoleARN) > 0 { + authRole = &admin.AuthRole{ + KubernetesServiceAccount: executionConfig.KubeServiceAcct, + AssumableIamRole: executionConfig.IamRoleARN, + } + securityContext = &core.SecurityContext{ + RunAs: &core.Identity{ + K8SServiceAccount: executionConfig.KubeServiceAcct, + IamRole: executionConfig.IamRoleARN, + }, + } + } + + id := &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: task.Id.Name, + Version: task.Id.Version, + } + + return createExecutionRequest(id, inputs, envs, securityContext, authRole, targetExecName), nil +} + +func relaunchExecution(ctx context.Context, executionName string, project string, domain string, + cmdCtx cmdCore.CommandContext, executionConfig *ExecutionConfig, targetExecutionName string) error { + if executionConfig.DryRun { + logger.Debugf(ctx, "skipping RelaunchExecution request (DryRun)") + return nil + } + relaunchedExec, err := cmdCtx.AdminClient().RelaunchExecution(ctx, &admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: executionName, + Project: project, + Domain: domain, + }, + Name: targetExecutionName, + OverwriteCache: executionConfig.OverwriteCache, + }) + if err != nil { + return err + } + fmt.Printf("execution identifier %v\n", relaunchedExec.Id) + return nil +} + +func recoverExecution(ctx context.Context, executionName string, project string, domain string, + cmdCtx cmdCore.CommandContext, executionConfig *ExecutionConfig, targetExecName string) error { + if executionConfig.DryRun { + logger.Debugf(ctx, "skipping RecoverExecution request (DryRun)") + return nil + } + recoveredExec, err := cmdCtx.AdminClient().RecoverExecution(ctx, &admin.ExecutionRecoverRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: executionName, + Project: project, + Domain: domain, + }, + Name: targetExecName, + }) + if err != nil { + return err + } + fmt.Printf("execution identifier %v\n", recoveredExec.Id) + return nil +} + +func createExecutionRequest(ID *core.Identifier, inputs *core.LiteralMap, envs *admin.Envs, securityContext *core.SecurityContext, authRole *admin.AuthRole, targetExecName string) *admin.ExecutionCreateRequest { + + if len(targetExecName) == 0 { + targetExecName = "f" + strings.ReplaceAll(uuid.New().String(), "-", "")[:19] + } + var clusterAssignment *admin.ClusterAssignment + if executionConfig.ClusterPool != "" { + clusterAssignment = &admin.ClusterAssignment{ClusterPoolName: executionConfig.ClusterPool} + } + return &admin.ExecutionCreateRequest{ + Project: executionConfig.TargetProject, + Domain: executionConfig.TargetDomain, + Name: targetExecName, + Spec: &admin.ExecutionSpec{ + LaunchPlan: ID, + Metadata: &admin.ExecutionMetadata{ + Mode: admin.ExecutionMetadata_MANUAL, + Principal: "sdk", + Nesting: 0, + }, + AuthRole: authRole, + SecurityContext: securityContext, + ClusterAssignment: clusterAssignment, + OverwriteCache: executionConfig.OverwriteCache, + Envs: envs, + }, + Inputs: inputs, + } +} + +func readExecConfigFromFile(fileName string) (*ExecutionConfig, error) { + data, _err := ioutil.ReadFile(fileName) + if _err != nil { + return nil, fmt.Errorf("unable to read from %v yaml file", fileName) + } + executionConfigRead := ExecutionConfig{} + if _err = yaml.Unmarshal(data, &executionConfigRead); _err != nil { + return nil, _err + } + return &executionConfigRead, nil +} + +func resolveOverrides(toBeOverridden *ExecutionConfig, project string, domain string) { + toBeOverridden.DryRun = executionConfig.DryRun + if executionConfig.KubeServiceAcct != "" { + toBeOverridden.KubeServiceAcct = executionConfig.KubeServiceAcct + } + if executionConfig.IamRoleARN != "" { + toBeOverridden.IamRoleARN = executionConfig.IamRoleARN + } + if executionConfig.TargetProject != "" { + toBeOverridden.TargetProject = executionConfig.TargetProject + } + if executionConfig.TargetDomain != "" { + toBeOverridden.TargetDomain = executionConfig.TargetDomain + } + if executionConfig.Version != "" { + toBeOverridden.Version = executionConfig.Version + } + if executionConfig.ClusterPool != "" { + toBeOverridden.ClusterPool = executionConfig.ClusterPool + } + // Use the root project and domain to launch the task/workflow if target is unspecified + if executionConfig.TargetProject == "" { + toBeOverridden.TargetProject = project + } + if executionConfig.TargetDomain == "" { + toBeOverridden.TargetDomain = domain + } +} + +func readConfigAndValidate(project string, domain string) (ExecutionParams, error) { + executionParams := ExecutionParams{} + if executionConfig.ExecFile == "" && executionConfig.Relaunch == "" && executionConfig.Recover == "" { + return executionParams, fmt.Errorf("executionConfig, relaunch and recover can't be empty." + + " Run the flytectl get task/launchplan to generate the config") + } + if executionConfig.Relaunch != "" { + resolveOverrides(executionConfig, project, domain) + return ExecutionParams{name: executionConfig.Relaunch, execType: Relaunch}, nil + } + if len(executionConfig.Recover) > 0 { + resolveOverrides(executionConfig, project, domain) + return ExecutionParams{name: executionConfig.Recover, execType: Recover}, nil + } + var readExecutionConfig *ExecutionConfig + var err error + if readExecutionConfig, err = readExecConfigFromFile(executionConfig.ExecFile); err != nil { + return executionParams, err + } + resolveOverrides(readExecutionConfig, project, domain) + // Update executionConfig pointer to readExecutionConfig as it contains all the updates. + executionConfig = readExecutionConfig + isTask := readExecutionConfig.Task != "" + isWorkflow := readExecutionConfig.Workflow != "" + if isTask == isWorkflow { + return executionParams, fmt.Errorf("either task or workflow name should be specified" + + " to launch an execution") + } + name := readExecutionConfig.Task + execType := Task + if !isTask { + name = readExecutionConfig.Workflow + execType = Workflow + } + return ExecutionParams{name: name, execType: execType}, nil +} + +func makeEnvs(executionConfig *ExecutionConfig) *admin.Envs { + var values []*core.KeyValuePair + for key, value := range executionConfig.Envs { + values = append(values, &core.KeyValuePair{Key: key, Value: value}) + } + return &admin.Envs{Values: values} +} diff --git a/flytectl/cmd/create/execution_util_test.go b/flytectl/cmd/create/execution_util_test.go new file mode 100644 index 0000000000..a865e77a64 --- /dev/null +++ b/flytectl/cmd/create/execution_util_test.go @@ -0,0 +1,280 @@ +package create + +import ( + "errors" + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + executionCreateResponse *admin.ExecutionCreateResponse + relaunchRequest *admin.ExecutionRelaunchRequest + recoverRequest *admin.ExecutionRecoverRequest +) + +// This function needs to be called after testutils.Steup() +func createExecutionUtilSetup() { + executionCreateResponse = &admin.ExecutionCreateResponse{ + Id: &core.WorkflowExecutionIdentifier{ + Project: "flytesnacks", + Domain: "development", + Name: "f652ea3596e7f4d80a0e", + }, + } + relaunchRequest = &admin.ExecutionRelaunchRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: "execName", + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + } + recoverRequest = &admin.ExecutionRecoverRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Name: "execName", + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + } + executionConfig = &ExecutionConfig{} +} + +func TestCreateExecutionForRelaunch(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(executionCreateResponse, nil) + err := relaunchExecution(s.Ctx, "execName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) +} + +func TestCreateExecutionForRelaunchNotFound(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(nil, errors.New("unknown execution")) + err := relaunchExecution(s.Ctx, "execName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + + assert.NotNil(t, err) + assert.Equal(t, err, errors.New("unknown execution")) +} + +func TestCreateExecutionForRecovery(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(executionCreateResponse, nil) + err := recoverExecution(s.Ctx, "execName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) +} + +func TestCreateExecutionForRecoveryNotFound(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(nil, errors.New("unknown execution")) + err := recoverExecution(s.Ctx, "execName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.NotNil(t, err) + assert.Equal(t, err, errors.New("unknown execution")) +} + +func TestCreateExecutionRequestForWorkflow(t *testing.T) { + t.Run("successful", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + launchPlan := &admin.LaunchPlan{} + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan, nil) + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("successful with envs", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + launchPlan := &admin.LaunchPlan{} + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan, nil) + var executionConfigWithEnvs = &ExecutionConfig{ + Envs: map[string]string{"foo": "bar"}, + } + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("successful with empty envs", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + launchPlan := &admin.LaunchPlan{} + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan, nil) + var executionConfigWithEnvs = &ExecutionConfig{ + Envs: map[string]string{}, + } + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("failed literal conversion", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + launchPlan := &admin.LaunchPlan{ + Spec: &admin.LaunchPlanSpec{ + DefaultInputs: &core.ParameterMap{ + Parameters: map[string]*core.Parameter{"nilparam": nil}, + }, + }, + } + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan, nil) + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.NotNil(t, err) + assert.Nil(t, execCreateRequest) + assert.Equal(t, fmt.Errorf("parameter [nilparam] has nil Variable"), err) + }) + t.Run("failed fetch", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.NotNil(t, err) + assert.Nil(t, execCreateRequest) + assert.Equal(t, err, errors.New("failed")) + }) + t.Run("with security context", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + executionConfig.KubeServiceAcct = "default" + launchPlan := &admin.LaunchPlan{} + s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan, nil) + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, mock.Anything).Return(launchPlan, nil) + execCreateRequest, err := createExecutionRequestForWorkflow(s.Ctx, "wfName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + executionConfig.KubeServiceAcct = "" + }) +} + +func TestCreateExecutionRequestForTask(t *testing.T) { + t.Run("successful", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + task := &admin.Task{ + Id: &core.Identifier{ + Name: "taskName", + }, + } + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(task, nil) + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("successful with envs", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + task := &admin.Task{ + Id: &core.Identifier{ + Name: "taskName", + }, + } + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(task, nil) + var executionConfigWithEnvs = &ExecutionConfig{ + Envs: map[string]string{"foo": "bar"}, + } + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("successful with empty envs", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + task := &admin.Task{ + Id: &core.Identifier{ + Name: "taskName", + }, + } + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(task, nil) + var executionConfigWithEnvs = &ExecutionConfig{ + Envs: map[string]string{}, + } + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfigWithEnvs, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + }) + t.Run("failed literal conversion", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + task := &admin.Task{ + Closure: &admin.TaskClosure{ + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "nilvar": nil, + }, + }, + }, + }, + }, + }, + } + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(task, nil) + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.NotNil(t, err) + assert.Nil(t, execCreateRequest) + assert.Equal(t, fmt.Errorf("variable [nilvar] has nil type"), err) + }) + t.Run("failed fetch", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.NotNil(t, err) + assert.Nil(t, execCreateRequest) + assert.Equal(t, err, errors.New("failed")) + }) + t.Run("with security context", func(t *testing.T) { + s := setup() + createExecutionUtilSetup() + executionConfig.KubeServiceAcct = "default" + task := &admin.Task{ + Id: &core.Identifier{ + Name: "taskName", + }, + } + s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(task, nil) + execCreateRequest, err := createExecutionRequestForTask(s.Ctx, "taskName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) + assert.NotNil(t, execCreateRequest) + executionConfig.KubeServiceAcct = "" + }) +} + +func Test_resolveOverrides(t *testing.T) { + executionConfig.KubeServiceAcct = "k8s-acct" + executionConfig.IamRoleARN = "iam-role" + executionConfig.TargetProject = "t-proj" + executionConfig.TargetDomain = "t-domain" + executionConfig.Version = "v1" + executionConfig.ClusterPool = "gpu" + cfg := &ExecutionConfig{} + + resolveOverrides(cfg, "p1", "d1") + + assert.Equal(t, "k8s-acct", cfg.KubeServiceAcct) + assert.Equal(t, "iam-role", cfg.IamRoleARN) + assert.Equal(t, "t-proj", cfg.TargetProject) + assert.Equal(t, "t-domain", cfg.TargetDomain) + assert.Equal(t, "v1", cfg.Version) + assert.Equal(t, "gpu", cfg.ClusterPool) +} + +func TestCreateExecutionForRelaunchOverwritingCache(t *testing.T) { + s := setup() + createExecutionUtilSetup() + executionConfig.OverwriteCache = true + relaunchRequest.OverwriteCache = true // ensure request has overwriteCache param set + s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(executionCreateResponse, nil) + err := relaunchExecution(s.Ctx, "execName", config.GetConfig().Project, config.GetConfig().Domain, s.CmdCtx, executionConfig, "") + assert.Nil(t, err) +} diff --git a/flytectl/cmd/create/executionconfig_flags.go b/flytectl/cmd/create/executionconfig_flags.go new file mode 100755 index 0000000000..9908df93b1 --- /dev/null +++ b/flytectl/cmd/create/executionconfig_flags.go @@ -0,0 +1,67 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package create + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (ExecutionConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (ExecutionConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (ExecutionConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in ExecutionConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg ExecutionConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("ExecutionConfig", pflag.ExitOnError) + cmdFlags.StringVar(&executionConfig.ExecFile, fmt.Sprintf("%v%v", prefix, "execFile"), executionConfig.ExecFile, "file for the execution params. If not specified defaults to <_name>.execution_spec.yaml") + cmdFlags.StringVar(&executionConfig.TargetDomain, fmt.Sprintf("%v%v", prefix, "targetDomain"), executionConfig.TargetDomain, "project where execution needs to be created. If not specified configured domain would be used.") + cmdFlags.StringVar(&executionConfig.TargetProject, fmt.Sprintf("%v%v", prefix, "targetProject"), executionConfig.TargetProject, "project where execution needs to be created. If not specified configured project would be used.") + cmdFlags.StringVar(&executionConfig.KubeServiceAcct, fmt.Sprintf("%v%v", prefix, "kubeServiceAcct"), executionConfig.KubeServiceAcct, "kubernetes service account AuthRole for launching execution.") + cmdFlags.StringVar(&executionConfig.IamRoleARN, fmt.Sprintf("%v%v", prefix, "iamRoleARN"), executionConfig.IamRoleARN, "iam role ARN AuthRole for launching execution.") + cmdFlags.StringVar(&executionConfig.Relaunch, fmt.Sprintf("%v%v", prefix, "relaunch"), executionConfig.Relaunch, "execution id to be relaunched.") + cmdFlags.StringVar(&executionConfig.Recover, fmt.Sprintf("%v%v", prefix, "recover"), executionConfig.Recover, "execution id to be recreated from the last known failure point.") + cmdFlags.BoolVar(&executionConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), executionConfig.DryRun, "execute command without making any modifications.") + cmdFlags.StringVar(&executionConfig.Version, fmt.Sprintf("%v%v", prefix, "version"), executionConfig.Version, "specify version of execution workflow/task.") + cmdFlags.StringVar(&executionConfig.ClusterPool, fmt.Sprintf("%v%v", prefix, "clusterPool"), executionConfig.ClusterPool, "specify which cluster pool to assign execution to.") + cmdFlags.BoolVar(&executionConfig.OverwriteCache, fmt.Sprintf("%v%v", prefix, "overwriteCache"), executionConfig.OverwriteCache, "skip cached results when performing execution, causing all outputs to be re-calculated and stored data to be overwritten. Does not work for recovered executions.") + cmdFlags.StringVar(&executionConfig.Workflow, fmt.Sprintf("%v%v", prefix, "workflow"), executionConfig.Workflow, "") + cmdFlags.StringVar(&executionConfig.Task, fmt.Sprintf("%v%v", prefix, "task"), executionConfig.Task, "") + return cmdFlags +} diff --git a/flytectl/cmd/create/executionconfig_flags_test.go b/flytectl/cmd/create/executionconfig_flags_test.go new file mode 100755 index 0000000000..7891b4f9ba --- /dev/null +++ b/flytectl/cmd/create/executionconfig_flags_test.go @@ -0,0 +1,284 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package create + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsExecutionConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementExecutionConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsExecutionConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookExecutionConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementExecutionConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_ExecutionConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookExecutionConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_ExecutionConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_ExecutionConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_ExecutionConfig(val, result)) +} + +func testDecodeRaw_ExecutionConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_ExecutionConfig(vStringSlice, result)) +} + +func TestExecutionConfig_GetPFlagSet(t *testing.T) { + val := ExecutionConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestExecutionConfig_SetFlags(t *testing.T) { + actual := ExecutionConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_execFile", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("execFile", testValue) + if vString, err := cmdFlags.GetString("execFile"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.ExecFile) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_targetDomain", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("targetDomain", testValue) + if vString, err := cmdFlags.GetString("targetDomain"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.TargetDomain) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_targetProject", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("targetProject", testValue) + if vString, err := cmdFlags.GetString("targetProject"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.TargetProject) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubeServiceAcct", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubeServiceAcct", testValue) + if vString, err := cmdFlags.GetString("kubeServiceAcct"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.KubeServiceAcct) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_iamRoleARN", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("iamRoleARN", testValue) + if vString, err := cmdFlags.GetString("iamRoleARN"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.IamRoleARN) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_relaunch", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("relaunch", testValue) + if vString, err := cmdFlags.GetString("relaunch"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.Relaunch) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_recover", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("recover", testValue) + if vString, err := cmdFlags.GetString("recover"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.Recover) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_version", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("version", testValue) + if vString, err := cmdFlags.GetString("version"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.Version) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_clusterPool", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("clusterPool", testValue) + if vString, err := cmdFlags.GetString("clusterPool"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.ClusterPool) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_overwriteCache", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("overwriteCache", testValue) + if vBool, err := cmdFlags.GetBool("overwriteCache"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vBool), &actual.OverwriteCache) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_workflow", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("workflow", testValue) + if vString, err := cmdFlags.GetString("workflow"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.Workflow) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_task", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("task", testValue) + if vString, err := cmdFlags.GetString("task"); err == nil { + testDecodeJson_ExecutionConfig(t, fmt.Sprintf("%v", vString), &actual.Task) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/create/project.go b/flytectl/cmd/create/project.go new file mode 100644 index 0000000000..bf4e70c09e --- /dev/null +++ b/flytectl/cmd/create/project.go @@ -0,0 +1,77 @@ +package create + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + + "github.com/flyteorg/flyte/flytestdlib/logger" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + projectShort = "Creates project resources." + projectLong = ` +Create a project given its name and id. + +:: + + flytectl create project --name flytesnacks --id flytesnacks --description "flytesnacks description" --labels app=flyte + +.. note:: + The terms project/projects are interchangeable in these commands. + +Create a project by definition file. + +:: + + flytectl create project --file project.yaml + +.. code-block:: yaml + + id: "project-unique-id" + name: "Name" + labels: + values: + app: flyte + description: "Some description for the project." + +.. note:: + The project name shouldn't contain any whitespace characters. +` +) + +func createProjectsCommand(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + projectSpec, err := project.DefaultProjectConfig.GetProjectSpec(config.GetConfig()) + if err != nil { + return err + } + if projectSpec.Id == "" { + return fmt.Errorf(clierrors.ErrProjectNotPassed) + } + if projectSpec.Name == "" { + return fmt.Errorf(clierrors.ErrProjectNameNotPassed) + } + + if project.DefaultProjectConfig.DryRun { + logger.Debugf(ctx, "skipping RegisterProject request (DryRun)") + } else { + _, err := cmdCtx.AdminClient().RegisterProject(ctx, &admin.ProjectRegisterRequest{ + Project: &admin.Project{ + Id: projectSpec.Id, + Name: projectSpec.Name, + Description: projectSpec.Description, + Labels: projectSpec.Labels, + }, + }) + if err != nil { + return err + } + } + fmt.Println("project created successfully.") + return nil +} diff --git a/flytectl/cmd/create/project_test.go b/flytectl/cmd/create/project_test.go new file mode 100644 index 0000000000..a1f970e26b --- /dev/null +++ b/flytectl/cmd/create/project_test.go @@ -0,0 +1,78 @@ +package create + +import ( + "errors" + "fmt" + "testing" + + "github.com/flyteorg/flytectl/clierrors" + + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const projectValue = "dummyProject" + +var ( + projectRegisterRequest *admin.ProjectRegisterRequest +) + +func createProjectSetup() { + projectRegisterRequest = &admin.ProjectRegisterRequest{ + Project: &admin.Project{ + Id: projectValue, + Name: projectValue, + Description: "", + Labels: &admin.Labels{ + Values: map[string]string{}, + }, + }, + } + project.DefaultProjectConfig.ID = "" + project.DefaultProjectConfig.Name = "" + project.DefaultProjectConfig.Labels = map[string]string{} + project.DefaultProjectConfig.Description = "" + config.GetConfig().Project = "" +} +func TestCreateProjectFunc(t *testing.T) { + s := setup() + createProjectSetup() + defer s.TearDownAndVerify(t, "project created successfully.") + project.DefaultProjectConfig.ID = projectValue + project.DefaultProjectConfig.Name = projectValue + project.DefaultProjectConfig.Labels = map[string]string{} + project.DefaultProjectConfig.Description = "" + s.MockAdminClient.OnRegisterProjectMatch(s.Ctx, projectRegisterRequest).Return(nil, nil) + err := createProjectsCommand(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.MockAdminClient.AssertCalled(t, "RegisterProject", s.Ctx, projectRegisterRequest) +} + +func TestEmptyProjectID(t *testing.T) { + s := setup() + createProjectSetup() + defer s.TearDownAndVerify(t, "") + project.DefaultProjectConfig = &project.ConfigProject{} + s.MockAdminClient.OnRegisterProjectMatch(s.Ctx, projectRegisterRequest).Return(nil, nil) + err := createProjectsCommand(s.Ctx, []string{}, s.CmdCtx) + assert.Equal(t, errors.New(clierrors.ErrProjectNotPassed), err) + s.MockAdminClient.AssertNotCalled(t, "RegisterProject", s.Ctx, mock.Anything) +} + +func TestEmptyProjectName(t *testing.T) { + s := setup() + createProjectSetup() + defer s.TearDownAndVerify(t, "") + project.DefaultProjectConfig.ID = projectValue + project.DefaultProjectConfig.Labels = map[string]string{} + project.DefaultProjectConfig.Description = "" + s.MockAdminClient.OnRegisterProjectMatch(s.Ctx, projectRegisterRequest).Return(nil, nil) + err := createProjectsCommand(s.Ctx, []string{}, s.CmdCtx) + assert.Equal(t, fmt.Errorf("project name is a required flag"), err) + s.MockAdminClient.AssertNotCalled(t, "RegisterProject", s.Ctx, mock.Anything) +} diff --git a/flytectl/cmd/create/serialization_utils.go b/flytectl/cmd/create/serialization_utils.go new file mode 100644 index 0000000000..dfd772c604 --- /dev/null +++ b/flytectl/cmd/create/serialization_utils.go @@ -0,0 +1,62 @@ +package create + +import ( + "fmt" + + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +// TODO: Move all functions to flyteidl +// MakeLiteralForVariables builds a map of literals for the provided serialized values. If a provided value does not have +// a corresponding variable or if that variable is invalid (e.g. doesn't have Type property populated), it returns an +// error. +func MakeLiteralForVariables(serialize map[string]interface{}, variables map[string]*core.Variable) (map[string]*core.Literal, error) { + types := make(map[string]*core.LiteralType) + for k, v := range variables { + t := v.GetType() + if t == nil { + return nil, fmt.Errorf("variable [%v] has nil type", k) + } + + types[k] = t + } + + return MakeLiteralForTypes(serialize, types) +} + +// MakeLiteralForParams builds a map of literals for the provided serialized values. If a provided value does not have +// a corresponding parameter or if that parameter is invalid (e.g. doesn't have Type property populated), it returns an +// error. +func MakeLiteralForParams(serialize map[string]interface{}, parameters map[string]*core.Parameter) (map[string]*core.Literal, error) { + types := make(map[string]*core.LiteralType) + for k, v := range parameters { + if variable := v.GetVar(); variable == nil { + return nil, fmt.Errorf("parameter [%v] has nil Variable", k) + } else if t := variable.GetType(); t == nil { + return nil, fmt.Errorf("parameter [%v] has nil variable type", k) + } else { + types[k] = t + } + } + + return MakeLiteralForTypes(serialize, types) +} + +// MakeLiteralForTypes builds a map of literals for the provided serialized values. If a provided value does not have +// a corresponding type or if it fails to create a literal for the given type and value, it returns an error. +func MakeLiteralForTypes(serialize map[string]interface{}, types map[string]*core.LiteralType) (map[string]*core.Literal, error) { + result := make(map[string]*core.Literal) + var err error + for k, v := range serialize { + if t, typeFound := types[k]; typeFound { + if result[k], err = coreutils.MakeLiteralForType(t, v); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("no matching type for [%v]", k) + } + } + + return result, nil +} diff --git a/flytectl/cmd/create/serialization_utils_test.go b/flytectl/cmd/create/serialization_utils_test.go new file mode 100644 index 0000000000..7b10cf1b7c --- /dev/null +++ b/flytectl/cmd/create/serialization_utils_test.go @@ -0,0 +1,148 @@ +package create + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/stretchr/testify/assert" +) + +func TestMakeLiteralForTypes(t *testing.T) { + inputTypes := map[string]*core.LiteralType{ + "a": { + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + "x": { + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + "b": { + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRING, + }, + }, + "y": { + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRING, + }, + }, + } + + t.Run("Happy path", func(t *testing.T) { + inputValues := map[string]interface{}{ + "a": 5, + "b": "hello", + } + + m, err := MakeLiteralForTypes(inputValues, inputTypes) + assert.NoError(t, err) + assert.Len(t, m, len(inputValues)) + }) + + t.Run("Type not found", func(t *testing.T) { + inputValues := map[string]interface{}{ + "notfound": 5, + } + + _, err := MakeLiteralForTypes(inputValues, inputTypes) + assert.Error(t, err) + }) + + t.Run("Invalid value", func(t *testing.T) { + inputValues := map[string]interface{}{ + "a": "hello", + } + + _, err := MakeLiteralForTypes(inputValues, inputTypes) + assert.Error(t, err) + }) +} + +func TestMakeLiteralForParams(t *testing.T) { + inputValues := map[string]interface{}{ + "a": "hello", + } + + t.Run("Happy path", func(t *testing.T) { + inputParams := map[string]*core.Parameter{ + "a": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRING, + }, + }, + }, + }, + } + + m, err := MakeLiteralForParams(inputValues, inputParams) + assert.NoError(t, err) + assert.Len(t, m, len(inputValues)) + }) + + t.Run("Invalid Param", func(t *testing.T) { + inputParams := map[string]*core.Parameter{ + "a": nil, + } + + _, err := MakeLiteralForParams(inputValues, inputParams) + assert.Error(t, err) + }) + + t.Run("Invalid Type", func(t *testing.T) { + inputParams := map[string]*core.Parameter{ + "a": { + Var: &core.Variable{}, + }, + } + + _, err := MakeLiteralForParams(inputValues, inputParams) + assert.Error(t, err) + }) +} + +func TestMakeLiteralForVariables(t *testing.T) { + inputValues := map[string]interface{}{ + "a": "hello", + } + + t.Run("Happy path", func(t *testing.T) { + inputVariables := map[string]*core.Variable{ + "a": { + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRING, + }, + }, + }, + } + + m, err := MakeLiteralForVariables(inputValues, inputVariables) + assert.NoError(t, err) + assert.Len(t, m, len(inputValues)) + }) + + t.Run("Invalid Variable", func(t *testing.T) { + inputVariables := map[string]*core.Variable{ + "a": nil, + } + + _, err := MakeLiteralForVariables(inputValues, inputVariables) + assert.Error(t, err) + }) + + t.Run("Invalid Type", func(t *testing.T) { + inputVariables := map[string]*core.Variable{ + "a": { + Type: nil, + }, + } + + _, err := MakeLiteralForVariables(inputValues, inputVariables) + assert.Error(t, err) + }) +} diff --git a/flytectl/cmd/delete/delete.go b/flytectl/cmd/delete/delete.go new file mode 100644 index 0000000000..b25c91babd --- /dev/null +++ b/flytectl/cmd/delete/delete.go @@ -0,0 +1,58 @@ +package delete + +import ( + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using Sphinx. +const ( + deleteCmdShort = `Terminates/deletes various Flyte resources such as executions and resource attributes.` + deleteCmdLong = ` +Delete a resource; if an execution: +:: + + flytectl delete execution kxd1i72850 -d development -p flytesnacks +` +) + +// RemoteDeleteCommand will return delete command +func RemoteDeleteCommand() *cobra.Command { + deleteCmd := &cobra.Command{ + Use: "delete", + Short: deleteCmdShort, + Long: deleteCmdLong, + } + terminateResourcesFuncs := map[string]cmdcore.CommandEntry{ + "execution": {CmdFunc: terminateExecutionFunc, Aliases: []string{"executions"}, Short: execCmdShort, + Long: execCmdLong, PFlagProvider: execution.DefaultExecDeleteConfig}, + "task-resource-attribute": {CmdFunc: deleteTaskResourceAttributes, Aliases: []string{"task-resource-attributes"}, + Short: taskResourceAttributesShort, + Long: taskResourceAttributesLong, PFlagProvider: taskresourceattribute.DefaultDelConfig, ProjectDomainNotRequired: true}, + "cluster-resource-attribute": {CmdFunc: deleteClusterResourceAttributes, Aliases: []string{"cluster-resource-attributes"}, + Short: clusterResourceAttributesShort, + Long: clusterResourceAttributesLong, PFlagProvider: clusterresourceattribute.DefaultDelConfig, ProjectDomainNotRequired: true}, + "execution-cluster-label": {CmdFunc: deleteExecutionClusterLabel, Aliases: []string{"execution-cluster-labels"}, + Short: executionClusterLabelShort, + Long: executionClusterLabelLong, PFlagProvider: executionclusterlabel.DefaultDelConfig, ProjectDomainNotRequired: true}, + "execution-queue-attribute": {CmdFunc: deleteExecutionQueueAttributes, Aliases: []string{"execution-queue-attributes"}, + Short: executionQueueAttributesShort, + Long: executionQueueAttributesLong, PFlagProvider: executionqueueattribute.DefaultDelConfig, ProjectDomainNotRequired: true}, + "plugin-override": {CmdFunc: deletePluginOverride, Aliases: []string{"plugin-overrides"}, + Short: pluginOverrideShort, + Long: pluginOverrideLong, PFlagProvider: pluginoverride.DefaultDelConfig, ProjectDomainNotRequired: true}, + "workflow-execution-config": {CmdFunc: deleteWorkflowExecutionConfig, Aliases: []string{"workflow-execution-config"}, + Short: workflowExecutionConfigShort, + Long: workflowExecutionConfigLong, PFlagProvider: workflowexecutionconfig.DefaultDelConfig, ProjectDomainNotRequired: true}, + } + cmdcore.AddCommands(deleteCmd, terminateResourcesFuncs) + return deleteCmd +} diff --git a/flytectl/cmd/delete/delete_test.go b/flytectl/cmd/delete/delete_test.go new file mode 100644 index 0000000000..4692e0e6dd --- /dev/null +++ b/flytectl/cmd/delete/delete_test.go @@ -0,0 +1,40 @@ +package delete + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flytectl/cmd/testutils" +) + +const ( + testDataNonExistentFile = "testdata/non-existent-file" + testDataInvalidAttrFile = "testdata/invalid_attribute.yaml" +) + +var setup = testutils.Setup + +func TestDeleteCommand(t *testing.T) { + deleteCommand := RemoteDeleteCommand() + assert.Equal(t, deleteCommand.Use, "delete") + assert.Equal(t, deleteCommand.Short, deleteCmdShort) + assert.Equal(t, deleteCommand.Long, deleteCmdLong) + assert.Equal(t, len(deleteCommand.Commands()), 7) + cmdNouns := deleteCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + useArray := []string{"cluster-resource-attribute", "execution", "execution-cluster-label", "execution-queue-attribute", "plugin-override", "task-resource-attribute", "workflow-execution-config"} + aliases := [][]string{{"cluster-resource-attributes"}, {"executions"}, {"execution-cluster-labels"}, {"execution-queue-attributes"}, {"plugin-overrides"}, {"task-resource-attributes"}, {"workflow-execution-config"}} + shortArray := []string{clusterResourceAttributesShort, execCmdShort, executionClusterLabelShort, executionQueueAttributesShort, pluginOverrideShort, taskResourceAttributesShort, workflowExecutionConfigShort} + longArray := []string{clusterResourceAttributesLong, execCmdLong, executionClusterLabelLong, executionQueueAttributesLong, pluginOverrideLong, taskResourceAttributesLong, workflowExecutionConfigLong} + for i := range cmdNouns { + assert.Equal(t, cmdNouns[i].Use, useArray[i]) + assert.Equal(t, cmdNouns[i].Aliases, aliases[i]) + assert.Equal(t, cmdNouns[i].Short, shortArray[i]) + assert.Equal(t, cmdNouns[i].Long, longArray[i]) + } +} diff --git a/flytectl/cmd/delete/execution.go b/flytectl/cmd/delete/execution.go new file mode 100644 index 0000000000..5d2e0ff421 --- /dev/null +++ b/flytectl/cmd/delete/execution.go @@ -0,0 +1,85 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +// Long descriptions are whitespace sensitive when generating docs using Sphinx. +const ( + execCmdShort = `Terminates/deletes execution resources.` + execCmdLong = ` +Task executions can be aborted only if they are in non-terminal state. If they are FAILED, ABORTED, or SUCCEEDED, calling terminate on them has no effect. +Terminate a single execution with its name: + +:: + + flytectl delete execution c6a51x2l9e -d development -p flytesnacks + +.. note:: + The terms execution/executions are interchangeable in these commands. + +Get an execution to check its state: + +:: + + flytectl get execution -d development -p flytesnacks + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | NAME (7) | WORKFLOW NAME | TYPE | PHASE | STARTED | ELAPSED TIME | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | c6a51x2l9e | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:13:04.680476300Z | 15.540361300s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + +Terminate multiple executions with their names: +:: + + flytectl delete execution eeam9s8sny p4wv4hwgc4 -d development -p flytesnacks + +Get an execution to find the state of previously terminated executions: + +:: + + flytectl get execution -d development -p flytesnacks + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | NAME (7) | WORKFLOW NAME | TYPE | PHASE | STARTED | ELAPSED TIME | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | c6a51x2l9e | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:13:04.680476300Z | 15.540361300s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | eeam9s8sny | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:14:04.803084100Z | 42.306385500s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | p4wv4hwgc4 | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:14:27.476307400Z | 19.727504400s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + +Usage +` +) + +func terminateExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + for i := 0; i < len(args); i++ { + name := args[i] + logger.Infof(ctx, "Terminating execution of %v execution ", name) + if execution.DefaultExecDeleteConfig.DryRun { + logger.Infof(ctx, "skipping TerminateExecution request (dryRun)") + } else { + _, err := cmdCtx.AdminClient().TerminateExecution(ctx, &admin.ExecutionTerminateRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: name, + }, + }) + if err != nil { + logger.Errorf(ctx, "Failed to terminate execution of %v execution due to %v ", name, err) + return err + } + } + logger.Infof(ctx, "Terminated execution of %v execution ", name) + } + return nil +} diff --git a/flytectl/cmd/delete/execution_test.go b/flytectl/cmd/delete/execution_test.go new file mode 100644 index 0000000000..63643a2c47 --- /dev/null +++ b/flytectl/cmd/delete/execution_test.go @@ -0,0 +1,72 @@ +package delete + +import ( + "errors" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/stretchr/testify/assert" +) + +var ( + args []string + terminateExecRequests []*admin.ExecutionTerminateRequest +) + +func terminateExecutionSetup() { + args = append(args, "exec1", "exec2") + terminateExecRequests = []*admin.ExecutionTerminateRequest{ + {Id: &core.WorkflowExecutionIdentifier{ + Name: "exec1", + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }}, + {Id: &core.WorkflowExecutionIdentifier{ + Name: "exec2", + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }}, + } +} + +func TestTerminateExecutionFunc(t *testing.T) { + s := setup() + terminateExecutionSetup() + terminateExecResponse := &admin.ExecutionTerminateResponse{} + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[1]).Return(terminateExecResponse, nil) + err := terminateExecutionFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.MockAdminClient.AssertCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[0]) + s.MockAdminClient.AssertCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[1]) + s.TearDownAndVerify(t, "") +} + +func TestTerminateExecutionFuncWithError(t *testing.T) { + s := setup() + terminateExecutionSetup() + terminateExecResponse := &admin.ExecutionTerminateResponse{} + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(nil, errors.New("failed to terminate")) + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[1]).Return(terminateExecResponse, nil) + err := terminateExecutionFunc(s.Ctx, args, s.CmdCtx) + assert.Equal(t, errors.New("failed to terminate"), err) + s.MockAdminClient.AssertCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[0]) + s.MockAdminClient.AssertNotCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[1]) + s.TearDownAndVerify(t, "") +} + +func TestTerminateExecutionFuncWithPartialSuccess(t *testing.T) { + s := setup() + terminateExecutionSetup() + terminateExecResponse := &admin.ExecutionTerminateResponse{} + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) + s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[1]).Return(nil, errors.New("failed to terminate")) + err := terminateExecutionFunc(s.Ctx, args, s.CmdCtx) + assert.Equal(t, errors.New("failed to terminate"), err) + s.MockAdminClient.AssertCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[0]) + s.MockAdminClient.AssertCalled(t, "TerminateExecution", s.Ctx, terminateExecRequests[1]) + s.TearDownAndVerify(t, "") +} diff --git a/flytectl/cmd/delete/matchable_attribute_util.go b/flytectl/cmd/delete/matchable_attribute_util.go new file mode 100644 index 0000000000..63c018cbd3 --- /dev/null +++ b/flytectl/cmd/delete/matchable_attribute_util.go @@ -0,0 +1,46 @@ +package delete + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/pkg/ext" +) + +func deleteMatchableAttr(ctx context.Context, project, domain, workflowName string, + deleter ext.AdminDeleterExtInterface, rsType admin.MatchableResource, dryRun bool) error { + if len(workflowName) > 0 { + // Delete the workflow attribute from the admin. If the attribute doesn't exist , admin deesn't return an error and same behavior is followed here + if dryRun { + fmt.Print("skipping DeleteWorkflowAttributes request (dryRun)\n") + } else { + err := deleter.DeleteWorkflowAttributes(ctx, project, domain, workflowName, rsType) + if err != nil { + return err + } + } + fmt.Printf("Deleted matchable resources from %v project and domain %v and workflow %v\n", project, domain, workflowName) + } else { + // Delete the project domain attribute from the admin. If the attribute doesn't exist , admin deesn't return an error and same behavior is followed here + if dryRun { + fmt.Print("skipping DeleteProjectDomainAttributes request (dryRun)\n") + } else { + if len(domain) == 0 { + err := deleter.DeleteProjectAttributes(ctx, project, rsType) + if err != nil { + return err + } + fmt.Printf("Deleted matchable resources from %v project \n", project) + } else { + err := deleter.DeleteProjectDomainAttributes(ctx, project, domain, rsType) + if err != nil { + return err + } + fmt.Printf("Deleted matchable resources from %v project and domain %v\n", project, domain) + } + } + + } + return nil +} diff --git a/flytectl/cmd/delete/matchable_cluster_resource_attribute.go b/flytectl/cmd/delete/matchable_cluster_resource_attribute.go new file mode 100644 index 0000000000..5b7a04f8df --- /dev/null +++ b/flytectl/cmd/delete/matchable_cluster_resource_attribute.go @@ -0,0 +1,78 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + clusterResourceAttributesShort = "Deletes matchable resources of cluster attributes." + clusterResourceAttributesLong = ` +Delete cluster resource attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete cluster-resource-attribute -p flytesnacks -d development + + +To delete cluster resource attribute using the config file that was used to create it, run: + +:: + + flytectl delete cluster-resource-attribute --attrFile cra.yaml + +For example, here's the config file cra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +Attributes are optional in the file, which are unread during the 'delete' command but can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete cluster resource attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run: + +:: + + flytectl delete cluster-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deleteClusterResourceAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := clusterresourceattribute.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize TaskResourceAttrFileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the attribute file + pwdGetter = &clusterresourceattribute.AttrFileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the taskResourceAttrFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_CLUSTER_RESOURCE, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go new file mode 100644 index 0000000000..37dd139475 --- /dev/null +++ b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deleteClusterResourceAttributeSetup() { + clusterresourceattribute.DefaultDelConfig = &clusterresourceattribute.AttrDeleteConfig{} + args = []string{} +} + +func TestDeleteClusterResourceAttributes(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delte project domain attributes")) + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delte project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_cluster_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteClusterResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deleteClusterResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_cluster_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_CLUSTER_RESOURCE) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deleteClusterResourceAttributeSetup() + // Empty attribute file + clusterresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deleteClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_CLUSTER_RESOURCE) + }) +} diff --git a/flytectl/cmd/delete/matchable_execution_cluster_label.go b/flytectl/cmd/delete/matchable_execution_cluster_label.go new file mode 100644 index 0000000000..544b8ae64f --- /dev/null +++ b/flytectl/cmd/delete/matchable_execution_cluster_label.go @@ -0,0 +1,75 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionClusterLabelShort = "Deletes matchable resources of execution cluster label." + executionClusterLabelLong = ` +Delete execution cluster label for a given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete execution-cluster-label -p flytesnacks -d development + +To delete execution cluster label using the config file that was used to create it, run: + +:: + + flytectl delete execution-cluster-label --attrFile ecl.yaml + +For example, here's the config file ecl.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +Value is optional in the file as it is unread during the delete command, but it can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete the execution cluster label of the workflow 'core.control_flow.merge_sort.merge_sort', run the following: + +:: + + flytectl delete execution-cluster-label -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deleteExecutionClusterLabel(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := executionclusterlabel.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize FileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the cluster label file + pwdGetter = &executionclusterlabel.FileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the ExecClusterLabelFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_EXECUTION_CLUSTER_LABEL, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_execution_cluster_label_test.go b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go new file mode 100644 index 0000000000..f7cc5e7f53 --- /dev/null +++ b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deleteExecutionClusterLabelSetup() { + executionclusterlabel.DefaultDelConfig = &executionclusterlabel.AttrDeleteConfig{} + args = []string{} +} + +func TestDeleteExecutionClusterLabels(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteExecutionClusterLabel(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delete project domain attributes")) + err := deleteExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_cluster_label.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionClusterLabel(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deleteExecutionClusterLabel(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_cluster_label.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deleteExecutionClusterLabelSetup() + // Empty attribute file + executionclusterlabel.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deleteExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + }) +} diff --git a/flytectl/cmd/delete/matchable_execution_queue_attribute.go b/flytectl/cmd/delete/matchable_execution_queue_attribute.go new file mode 100644 index 0000000000..082cfec4af --- /dev/null +++ b/flytectl/cmd/delete/matchable_execution_queue_attribute.go @@ -0,0 +1,79 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionQueueAttributesShort = "Deletes matchable resources of execution queue attributes." + executionQueueAttributesLong = ` +Delete execution queue attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete execution-queue-attribute -p flytesnacks -d development + +Delete execution queue attribute using the config file which was used to create it. + +:: + + flytectl delete execution-queue-attribute --attrFile era.yaml + +For example, here's the config file era.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +Value is optional in the file as it is unread during the delete command but it can be retained since the same file can be used for get, update and delete commands. + +To delete the execution queue attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete execution-queue-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deleteExecutionQueueAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := executionqueueattribute.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize AttrFileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the attribute file + pwdGetter = &executionqueueattribute.AttrFileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the AttrFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_EXECUTION_QUEUE, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go new file mode 100644 index 0000000000..734323d9f1 --- /dev/null +++ b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deleteExecutionQueueAttributeSetup() { + executionqueueattribute.DefaultDelConfig = &executionqueueattribute.AttrDeleteConfig{} + args = []string{} +} + +func TestDeleteExecutionQueueAttributes(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deleteExecutionQueueAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delte project domain attributes")) + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delte project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_queue_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionQueueAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deleteExecutionQueueAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_queue_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_EXECUTION_QUEUE) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + executionqueueattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deleteExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_QUEUE) + }) +} diff --git a/flytectl/cmd/delete/matchable_plugin_override.go b/flytectl/cmd/delete/matchable_plugin_override.go new file mode 100644 index 0000000000..76b90b0235 --- /dev/null +++ b/flytectl/cmd/delete/matchable_plugin_override.go @@ -0,0 +1,80 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + pluginOverrideShort = "Deletes matchable resources of plugin overrides." + pluginOverrideLong = ` +Delete plugin override for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete plugin-override -p flytesnacks -d development + + +To delete plugin override using the config file which was used to create it, run: +:: + + flytectl delete plugin-override --attrFile po.yaml + +For example, here's the config file po.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +Overrides are optional in the file as they are unread during the delete command but can be retained since the same file can be used for get, update and delete commands. + +To delete plugin override for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete plugin-override -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deletePluginOverride(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := pluginoverride.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize AttrFileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the attribute file + pwdGetter = &pluginoverride.FileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the AttrFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_PLUGIN_OVERRIDE, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_plugin_override_test.go b/flytectl/cmd/delete/matchable_plugin_override_test.go new file mode 100644 index 0000000000..1dcd10cadc --- /dev/null +++ b/flytectl/cmd/delete/matchable_plugin_override_test.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deletePluginOverrideSetup() { + pluginoverride.DefaultDelConfig = &pluginoverride.AttrDeleteConfig{} + args = []string{} +} + +func TestPluginOverride(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delte project domain attributes")) + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delte project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_plugin_override.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deletePluginOverride(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deletePluginOverride(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_workflow_plugin_override.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_PLUGIN_OVERRIDE) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deletePluginOverrideSetup() + // Empty attribute file + pluginoverride.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deletePluginOverride(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_PLUGIN_OVERRIDE) + }) +} diff --git a/flytectl/cmd/delete/matchable_task_resource_attribute.go b/flytectl/cmd/delete/matchable_task_resource_attribute.go new file mode 100644 index 0000000000..3bd3f4d366 --- /dev/null +++ b/flytectl/cmd/delete/matchable_task_resource_attribute.go @@ -0,0 +1,80 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + taskResourceAttributesShort = "Deletes matchable resources of task attributes." + taskResourceAttributesLong = ` +Delete task resource attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete task-resource-attribute -p flytesnacks -d development + +To delete task resource attribute using the config file which was used to create it, run: + +:: + + flytectl delete task-resource-attribute --attrFile tra.yaml + +For example, here's the config file tra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +The defaults/limits are optional in the file as they are unread during the delete command, but can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete task resource attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete task-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deleteTaskResourceAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := taskresourceattribute.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize TaskResourceAttrFileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the attribute file + pwdGetter = &taskresourceattribute.TaskResourceAttrFileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the taskResourceAttrFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_TASK_RESOURCE, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_task_resource_attribute_test.go b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go new file mode 100644 index 0000000000..33744489b1 --- /dev/null +++ b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deleteTaskResourceAttributeSetup() { + taskresourceattribute.DefaultDelConfig = &taskresourceattribute.AttrDeleteConfig{} + args = []string{} +} + +func TestDeleteTaskResourceAttributes(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delte project domain attributes")) + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delte project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_task_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteTaskResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deleteTaskResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_task_attribute.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_TASK_RESOURCE) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deleteTaskResourceAttributeSetup() + // Empty attribute file + taskresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deleteTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_TASK_RESOURCE) + }) +} diff --git a/flytectl/cmd/delete/matchable_workflow_execution_config.go b/flytectl/cmd/delete/matchable_workflow_execution_config.go new file mode 100644 index 0000000000..a284ca6d0e --- /dev/null +++ b/flytectl/cmd/delete/matchable_workflow_execution_config.go @@ -0,0 +1,79 @@ +package delete + +import ( + "context" + + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + workflowExecutionConfigShort = "Deletes matchable resources of workflow execution config." + workflowExecutionConfigLong = ` +Delete workflow execution config for the given project and domain combination or additionally the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete workflow-execution-config -p flytesnacks -d development + +To delete workflow execution config using the config file which was used to create it, run: + +:: + + flytectl delete workflow-execution-config --attrFile wec.yaml + +For example, here's the config file wec.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: demo + +Max_parallelism is optional in the file as it is unread during the delete command but can be retained since the same file can be used for get, update and delete commands. + +To delete workflow execution config for the workflow 'core.control_flow.merge_sort.merge_sort', run: + +:: + + flytectl delete workflow-execution-config -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage +` +) + +func deleteWorkflowExecutionConfig(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var pwdGetter sconfig.ProjectDomainWorkflowGetter + pwdGetter = sconfig.PDWGetterCommandLine{Config: config.GetConfig(), Args: args} + delConfig := workflowexecutionconfig.DefaultDelConfig + + // Get the project domain workflowName from the config file or commandline params + if len(delConfig.AttrFile) > 0 { + // Initialize FileConfig which will be used if delConfig.AttrFile is non empty + // And Reads from the workflow execution config file + pwdGetter = &workflowexecutionconfig.FileConfig{} + if err := sconfig.ReadConfigFromFile(pwdGetter, delConfig.AttrFile); err != nil { + return err + } + } + // Use the pwdGetter to initialize the project domain and workflow + project := pwdGetter.GetProject() + domain := pwdGetter.GetDomain() + workflowName := pwdGetter.GetWorkflow() + + // Deletes the matchable attributes using the WorkflowExecutionConfigFileConfig + if err := deleteMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminDeleterExt(), + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, delConfig.DryRun); err != nil { + return err + } + + return nil +} diff --git a/flytectl/cmd/delete/matchable_workflow_execution_config_test.go b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go new file mode 100644 index 0000000000..b3c41b09df --- /dev/null +++ b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go @@ -0,0 +1,131 @@ +package delete + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func deleteWorkflowExecutionConfigSetup() { + workflowexecutionconfig.DefaultDelConfig = &workflowexecutionconfig.AttrDeleteConfig{} + args = []string{} +} + +func TestDeleteWorkflowExecutionConfig(t *testing.T) { + t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = "" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("failed project domain attribute deletion", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(fmt.Errorf("failed to delete project domain attributes")) + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete project domain attributes"), err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("successful project domain attribute deletion file", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_workflow_execution_config.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil) + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("successful workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteWorkflowExecutionConfig(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("failed workflow attribute deletion", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = "" + args := []string{"workflow1"} + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(fmt.Errorf("failed to delete workflow attribute")) + err := deleteWorkflowExecutionConfig(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to delete workflow attribute"), err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow1", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("successful workflow attribute deletion file", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_workflow_workflow_execution_config.yaml" + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.DeleterExt.AssertCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("workflow attribute deletion non existent file", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataNonExistentFile + // No args implying project domain attribute deletion + s.DeleterExt.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil) + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + s.DeleterExt.AssertNotCalled(t, "DeleteWorkflowAttributes", + s.Ctx, "flytesnacks", "development", "core.control_flow.merge_sort.merge_sort", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) + t.Run("attribute deletion invalid file", func(t *testing.T) { + s := setup() + deleteWorkflowExecutionConfigSetup() + // Empty attribute file + workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataInvalidAttrFile + // No args implying project domain attribute deletion + err := deleteWorkflowExecutionConfig(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, + fmt.Errorf("error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\""), + err) + s.DeleterExt.AssertNotCalled(t, "DeleteProjectDomainAttributes", + s.Ctx, "flytesnacks", "development", admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + }) +} diff --git a/flytectl/cmd/delete/testdata/invalid_attribute.yaml b/flytectl/cmd/delete/testdata/invalid_attribute.yaml new file mode 100644 index 0000000000..1e7868c1e2 --- /dev/null +++ b/flytectl/cmd/delete/testdata/invalid_attribute.yaml @@ -0,0 +1,5 @@ +InvalidDomain: development +InvalidProject: flytesnacks +InvalidWorkflow: "" +cpu: "1" +memory: 150Mi \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_cluster_attribute.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_cluster_attribute.yaml new file mode 100644 index 0000000000..586fe522f3 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_cluster_attribute.yaml @@ -0,0 +1,5 @@ +domain: development +project: flytesnacks +attributes: + "foo": "bar" + "buzz": "lightyear" \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_execution_cluster_label.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_execution_cluster_label.yaml new file mode 100644 index 0000000000..afade68509 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_execution_cluster_label.yaml @@ -0,0 +1,3 @@ +domain: development +project: flytesnacks +value: foo \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_execution_queue_attribute.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_execution_queue_attribute.yaml new file mode 100644 index 0000000000..1620c65762 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_execution_queue_attribute.yaml @@ -0,0 +1,7 @@ +domain: development +project: flytesnacks +tags: + - foo + - bar + - buzz + - lightyear \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_plugin_override.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_plugin_override.yaml new file mode 100644 index 0000000000..9749e17100 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_plugin_override.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +overrides: + - task_type: python_task + plugin_id: + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # 0 : FAIL , 1: DEFAULT diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_task_attribute.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_task_attribute.yaml new file mode 100644 index 0000000000..cd1a5c9abc --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_task_attribute.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +defaults: + cpu: "1" + memory: 150Mi +limits: + cpu: "2" + memory: 450Mi \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_project_domain_workflow_execution_config.yaml b/flytectl/cmd/delete/testdata/valid_project_domain_workflow_execution_config.yaml new file mode 100644 index 0000000000..84b87197a1 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_project_domain_workflow_execution_config.yaml @@ -0,0 +1,3 @@ +domain: development +project: flytesnacks +max_parallelism: 5 \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_cluster_attribute.yaml b/flytectl/cmd/delete/testdata/valid_workflow_cluster_attribute.yaml new file mode 100644 index 0000000000..e4030e455d --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_cluster_attribute.yaml @@ -0,0 +1,6 @@ +Domain: development +Project: flytesnacks +Workflow: core.control_flow.merge_sort.merge_sort +attributes: + "foo": "bar" + "buzz": "lightyear" \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_execution_cluster_label.yaml b/flytectl/cmd/delete/testdata/valid_workflow_execution_cluster_label.yaml new file mode 100644 index 0000000000..068cbe9926 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_execution_cluster_label.yaml @@ -0,0 +1,4 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +value: foo \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_execution_queue_attribute.yaml b/flytectl/cmd/delete/testdata/valid_workflow_execution_queue_attribute.yaml new file mode 100644 index 0000000000..d8952b1a6c --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_execution_queue_attribute.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +tags: + - foo + - bar + - buzz + - lightyear \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_plugin_override.yaml b/flytectl/cmd/delete/testdata/valid_workflow_plugin_override.yaml new file mode 100644 index 0000000000..5b35e23e31 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_plugin_override.yaml @@ -0,0 +1,9 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +overrides: + - task_type: python_task + plugin_id: + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # 0 : FAIL , 1: DEFAULT \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_task_attribute.yaml b/flytectl/cmd/delete/testdata/valid_workflow_task_attribute.yaml new file mode 100644 index 0000000000..7c22207689 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_task_attribute.yaml @@ -0,0 +1,9 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +defaults: + cpu: "2" + memory: 250Mi +limits: + cpu: "3" + memory: 350Mi \ No newline at end of file diff --git a/flytectl/cmd/delete/testdata/valid_workflow_workflow_execution_config.yaml b/flytectl/cmd/delete/testdata/valid_workflow_workflow_execution_config.yaml new file mode 100644 index 0000000000..e4f6ec0049 --- /dev/null +++ b/flytectl/cmd/delete/testdata/valid_workflow_workflow_execution_config.yaml @@ -0,0 +1,4 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +max_parallelism: 5 \ No newline at end of file diff --git a/flytectl/cmd/demo/demo.go b/flytectl/cmd/demo/demo.go new file mode 100644 index 0000000000..a26b06e657 --- /dev/null +++ b/flytectl/cmd/demo/demo.go @@ -0,0 +1,74 @@ +package demo + +import ( + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + "github.com/spf13/cobra" +) + +const ( + flyteNs = "flyte" + K8sEndpoint = "https://127.0.0.1:6443" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + demoShort = `Helps with demo interactions like start, teardown, status, and exec.` + demoLong = ` +Flyte Demo is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte demo as a single Docker container locally. + +To create a demo cluster, run: +:: + + flytectl demo start + +To remove a demo cluster, run: +:: + + flytectl demo teardown + +To check the status of the demo container, run: +:: + + flytectl demo status + +To execute commands inside the demo container, use exec: +:: + + flytectl demo exec -- pwd +` +) + +// CreateDemoCommand will return demo command +func CreateDemoCommand() *cobra.Command { + demo := &cobra.Command{ + Use: "demo", + Short: demoShort, + Long: demoLong, + } + + demoResourcesFuncs := map[string]cmdcore.CommandEntry{ + "start": {CmdFunc: startDemoCluster, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: startShort, + Long: startLong, PFlagProvider: sandboxCmdConfig.DefaultConfig, DisableFlyteClient: true}, + "reload": {CmdFunc: reloadDemoCluster, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: reloadShort, + Long: reloadLong, PFlagProvider: sandboxCmdConfig.DefaultConfig, DisableFlyteClient: true}, + "teardown": {CmdFunc: teardownDemoCluster, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: teardownShort, + Long: teardownLong, + PFlagProvider: sandboxCmdConfig.DefaultTeardownFlags, + DisableFlyteClient: true}, + "status": {CmdFunc: demoClusterStatus, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: statusShort, + Long: statusLong}, + "exec": {CmdFunc: demoClusterExec, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: execShort, + Long: execLong, DisableFlyteClient: true}, + } + + cmdcore.AddCommands(demo, demoResourcesFuncs) + + return demo +} diff --git a/flytectl/cmd/demo/demo_test.go b/flytectl/cmd/demo/demo_test.go new file mode 100644 index 0000000000..fdc2c09b56 --- /dev/null +++ b/flytectl/cmd/demo/demo_test.go @@ -0,0 +1,43 @@ +package demo + +import ( + "fmt" + "sort" + "testing" + + "gotest.tools/assert" +) + +func TestCreateDemoCommand(t *testing.T) { + demoCommand := CreateDemoCommand() + assert.Equal(t, demoCommand.Use, "demo") + assert.Equal(t, demoCommand.Short, "Helps with demo interactions like start, teardown, status, and exec.") + fmt.Println(demoCommand.Commands()) + + assert.Equal(t, len(demoCommand.Commands()), 5) + cmdNouns := demoCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, cmdNouns[0].Use, "exec") + assert.Equal(t, cmdNouns[0].Short, execShort) + assert.Equal(t, cmdNouns[0].Long, execLong) + + assert.Equal(t, cmdNouns[1].Use, "reload") + assert.Equal(t, cmdNouns[1].Short, reloadShort) + assert.Equal(t, cmdNouns[1].Long, reloadLong) + + assert.Equal(t, cmdNouns[2].Use, "start") + assert.Equal(t, cmdNouns[2].Short, startShort) + assert.Equal(t, cmdNouns[2].Long, startLong) + + assert.Equal(t, cmdNouns[3].Use, "status") + assert.Equal(t, cmdNouns[3].Short, statusShort) + assert.Equal(t, cmdNouns[3].Long, statusLong) + + assert.Equal(t, cmdNouns[4].Use, "teardown") + assert.Equal(t, cmdNouns[4].Short, teardownShort) + assert.Equal(t, cmdNouns[4].Long, teardownLong) +} diff --git a/flytectl/cmd/demo/exec.go b/flytectl/cmd/demo/exec.go new file mode 100644 index 0000000000..1cda6d0e49 --- /dev/null +++ b/flytectl/cmd/demo/exec.go @@ -0,0 +1,50 @@ +package demo + +import ( + "context" + "fmt" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" +) + +const ( + execShort = "Executes non-interactive command inside the demo container" + execLong = ` +Run non-interactive commands inside the demo container and immediately return the output. +By default, "flytectl exec" is present in the /root directory inside the demo container. + +:: + + flytectl demo exec -- ls -al + +Usage` +) + +func demoClusterExec(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + if len(args) > 0 { + return execute(ctx, cli, args) + } + return fmt.Errorf("missing argument. Please check usage examples by running flytectl demo exec --help") +} + +func execute(ctx context.Context, cli docker.Docker, args []string) error { + c, err := docker.GetSandbox(ctx, cli) + if err != nil { + return err + } + if c != nil { + exec, err := docker.ExecCommend(ctx, cli, c.ID, args) + if err != nil { + return err + } + if err := docker.InspectExecResp(ctx, cli, exec.ID); err != nil { + return err + } + } + return nil +} diff --git a/flytectl/cmd/demo/exec_test.go b/flytectl/cmd/demo/exec_test.go new file mode 100644 index 0000000000..a842e39669 --- /dev/null +++ b/flytectl/cmd/demo/exec_test.go @@ -0,0 +1,76 @@ +package demo + +import ( + "bufio" + "context" + "fmt" + "io" + "strings" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + admin2 "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/stretchr/testify/mock" +) + +func TestDemoClusterExec(t *testing.T) { + mockDocker := &mocks.Docker{} + mockOutStream := new(io.Writer) + ctx := context.Background() + mockClient := admin2.InitializeMockClientset() + cmdCtx := cmdCore.NewCommandContext(mockClient, *mockOutStream) + reader := bufio.NewReader(strings.NewReader("test")) + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.ExecConfig.Cmd = []string{"ls -al"} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, docker.ExecConfig).Return(types.IDResponse{}, nil) + mockDocker.OnContainerExecInspectMatch(ctx, mock.Anything).Return(types.ContainerExecInspect{}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, fmt.Errorf("Test")) + docker.Client = mockDocker + err := demoClusterExec(ctx, []string{"ls -al"}, cmdCtx) + + assert.NotNil(t, err) +} + +func TestSandboxClusterExecWithoutCmd(t *testing.T) { + mockDocker := &mocks.Docker{} + reader := bufio.NewReader(strings.NewReader("test")) + s := testutils.Setup() + ctx := s.Ctx + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.ExecConfig.Cmd = []string{} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, docker.ExecConfig).Return(types.IDResponse{}, nil) + mockDocker.OnContainerExecInspectMatch(ctx, mock.Anything).Return(types.ContainerExecInspect{}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, fmt.Errorf("Test")) + docker.Client = mockDocker + err := demoClusterExec(ctx, []string{}, s.CmdCtx) + + assert.NotNil(t, err) +} diff --git a/flytectl/cmd/demo/reload.go b/flytectl/cmd/demo/reload.go new file mode 100644 index 0000000000..05c1ccfd97 --- /dev/null +++ b/flytectl/cmd/demo/reload.go @@ -0,0 +1,119 @@ +package demo + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flytestdlib/logger" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/k8s" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + internalBootstrapAgent = "flyte-sandbox-bootstrap" + labelSelector = "app.kubernetes.io/name=flyte-binary" +) +const ( + reloadShort = "Power cycle the Flyte executable pod, effectively picking up an updated config." + reloadLong = ` +If you've changed the ~/.flyte/state/flyte.yaml file, run this command to restart the Flyte binary pod, effectively +picking up the new settings: + +Usage +:: + + flytectl demo reload + +` +) + +func isLegacySandbox(ctx context.Context, cli docker.Docker, containerID string) (bool, error) { + var result bool + + // Check if sandbox is compatible with new bootstrap mechanism + exec, err := docker.ExecCommend( + ctx, + cli, + containerID, + []string{"sh", "-c", fmt.Sprintf("which %s > /dev/null", internalBootstrapAgent)}, + ) + if err != nil { + return result, err + } + if err = docker.InspectExecResp(ctx, cli, exec.ID); err != nil { + return result, err + } + res, err := cli.ContainerExecInspect(ctx, exec.ID) + if err != nil { + return result, err + } + + result = res.ExitCode != 0 + return result, nil +} + +func reloadDemoCluster(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + c, err := docker.GetSandbox(ctx, cli) + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("reload failed - could not find an active sandbox") + } + + // Working with a legacy sandbox - fallback to legacy reload mechanism + useLegacyMethod, err := isLegacySandbox(ctx, cli, c.ID) + if err != nil { + return err + } + if useLegacyMethod { + return legacyReloadDemoCluster(ctx) + } + + // At this point we know that we are on a modern sandbox, and we can use the + // internal bootstrap agent to reload the cluster + exec, err := docker.ExecCommend(ctx, cli, c.ID, []string{internalBootstrapAgent}) + if err != nil { + return err + } + if err = docker.InspectExecResp(ctx, cli, exec.ID); err != nil { + return err + } + + return nil +} + +// legacyReloadDemoCluster will kill the flyte binary pod so the new one can pick up a new config file +func legacyReloadDemoCluster(ctx context.Context) error { + k8sClient, err := k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) + if err != nil { + fmt.Println("Could not get K8s client") + return err + } + pi := k8sClient.CoreV1().Pods(flyteNs) + podList, err := pi.List(ctx, v1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + fmt.Println("could not list pods") + return err + } + if len(podList.Items) != 1 { + return fmt.Errorf("should only have one pod running, %d found, %v", len(podList.Items), podList.Items) + } + logger.Debugf(ctx, "Found %d pods\n", len(podList.Items)) + var grace = int64(0) + err = pi.Delete(ctx, podList.Items[0].Name, v1.DeleteOptions{ + GracePeriodSeconds: &grace, + }) + if err != nil { + fmt.Printf("Could not delete Flyte pod, old configuration may still be in effect. Err: %s\n", err) + return err + } + + return nil +} diff --git a/flytectl/cmd/demo/reload_test.go b/flytectl/cmd/demo/reload_test.go new file mode 100644 index 0000000000..ef50033dc7 --- /dev/null +++ b/flytectl/cmd/demo/reload_test.go @@ -0,0 +1,119 @@ +package demo + +import ( + "bufio" + "bytes" + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/flyteorg/flytectl/pkg/k8s" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testclient "k8s.io/client-go/kubernetes/fake" +) + +var fakePod = corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{}, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "dummyflytepod", + Labels: map[string]string{"app.kubernetes.io/name": "flyte-binary"}, + }, +} + +func sandboxSetup(ctx context.Context, legacy bool) { + mockDocker := &mocks.Docker{} + docker.Client = mockDocker + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + + // This first set of mocks is for the check for the bootstrap agent. This is + // Expected to fail in legacy sandboxes + var checkLegacySandboxExecExitCode int + if legacy { + checkLegacySandboxExecExitCode = 1 + } + mockDocker.OnContainerExecCreateMatch( + ctx, + docker.FlyteSandboxClusterName, + types.ExecConfig{ + AttachStderr: true, + Tty: true, + WorkingDir: "/", + AttachStdout: true, + Cmd: []string{"sh", "-c", fmt.Sprintf("which %s > /dev/null", internalBootstrapAgent)}, + }, + ).Return(types.IDResponse{ID: "0"}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, "0", types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: bufio.NewReader(bytes.NewReader([]byte{})), + }, nil) + mockDocker.OnContainerExecInspectMatch(ctx, "0").Return(types.ContainerExecInspect{ExitCode: checkLegacySandboxExecExitCode}, nil) + + // Register additional mocks for the actual execution of the bootstrap agent + // in non-legacy sandboxes + if !legacy { + mockDocker.OnContainerExecCreateMatch( + ctx, + docker.FlyteSandboxClusterName, + types.ExecConfig{ + AttachStderr: true, + Tty: true, + WorkingDir: "/", + AttachStdout: true, + Cmd: []string{internalBootstrapAgent}, + }, + ).Return(types.IDResponse{ID: "1"}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, "1", types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: bufio.NewReader(bytes.NewReader([]byte{})), + }, nil) + } +} + +func TestReloadLegacy(t *testing.T) { + ctx := context.Background() + commandCtx := cmdCore.CommandContext{} + sandboxSetup(ctx, false) + err := reloadDemoCluster(ctx, []string{}, commandCtx) + assert.Nil(t, err) +} + +func TestDemoReloadLegacy(t *testing.T) { + ctx := context.Background() + commandCtx := cmdCore.CommandContext{} + sandboxSetup(ctx, true) + t.Run("No errors", func(t *testing.T) { + client := testclient.NewSimpleClientset() + _, err := client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + assert.NoError(t, err) + k8s.Client = client + err = reloadDemoCluster(ctx, []string{}, commandCtx) + assert.NoError(t, err) + }) + + t.Run("Multiple pods will error", func(t *testing.T) { + client := testclient.NewSimpleClientset() + _, err := client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + assert.NoError(t, err) + fakePod.SetName("othername") + _, err = client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + assert.NoError(t, err) + k8s.Client = client + err = reloadDemoCluster(ctx, []string{}, commandCtx) + assert.Errorf(t, err, "should only have one pod") + }) +} diff --git a/flytectl/cmd/demo/start.go b/flytectl/cmd/demo/start.go new file mode 100644 index 0000000000..c542d615ae --- /dev/null +++ b/flytectl/cmd/demo/start.go @@ -0,0 +1,97 @@ +package demo + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/docker" + + "github.com/flyteorg/flytectl/pkg/sandbox" + + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + startShort = "Starts the Flyte demo cluster." + startLong = ` +Flyte demo is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte demo as a single Docker container locally. + +Starts the demo cluster without any source code: +:: + + flytectl demo start + +Runs a dev cluster, which only has minio and postgres pod. +:: + + flytectl demo start --dev + +Mounts your source code repository inside the demo cluster: +:: + + flytectl demo start --source=$HOME/flyteorg/flytesnacks + +Specify a Flyte demo compliant image with the registry. This is useful in case you want to use an image from your registry. +:: + + flytectl demo start --image docker.io/my-override:latest + +Note: If image flag is passed then Flytectl will ignore version and pre flags. + +Specify a Flyte demo image pull policy. Possible pull policy values are Always, IfNotPresent, or Never: +:: + + flytectl demo start --image docker.io/my-override:latest --imagePullPolicy Always + +Runs a specific version of Flyte. Flytectl demo only supports Flyte version available in the Github release, https://github.com/flyteorg/flyte/tags. +:: + + flytectl demo start --version=v0.14.0 + +.. note:: + Flytectl demo is only supported for Flyte versions >= v1.0.0 + +Runs the latest pre release of Flyte. +:: + + flytectl demo start --pre + +Start demo cluster passing environment variables. This can be used to pass docker specific env variables or flyte specific env variables. +eg : for passing timeout value in secs for the demo container use the following. +:: + + flytectl demo start --env FLYTE_TIMEOUT=700 + +The DURATION can be a positive integer or a floating-point number, followed by an optional unit suffix:: +s - seconds (default) +m - minutes +h - hours +d - days +When no unit is used, it defaults to seconds. If the duration is set to zero, the associated timeout is disabled. + + +eg : for passing multiple environment variables +:: + + flytectl demo start --env USER=foo --env PASSWORD=bar + + +For just printing the docker commands for bringing up the demo container +:: + + flytectl demo start --dryRun + +Usage +` +) + +func startDemoCluster(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cfg := sandboxCmdConfig.DefaultConfig + err := cfg.ImagePullPolicy.Set(docker.ImagePullPolicyIfNotPresent.String()) + if err != nil { + return err + } + + return sandbox.StartDemoCluster(ctx, args, cfg) +} diff --git a/flytectl/cmd/demo/start_test.go b/flytectl/cmd/demo/start_test.go new file mode 100644 index 0000000000..bed5a16777 --- /dev/null +++ b/flytectl/cmd/demo/start_test.go @@ -0,0 +1 @@ +package demo diff --git a/flytectl/cmd/demo/status.go b/flytectl/cmd/demo/status.go new file mode 100644 index 0000000000..7b34bed7e5 --- /dev/null +++ b/flytectl/cmd/demo/status.go @@ -0,0 +1,32 @@ +package demo + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/sandbox" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" +) + +const ( + statusShort = "Gets the status of the demo environment." + statusLong = ` +Retrieves the status of the demo environment. Currently, Flyte demo runs as a local Docker container. + +Usage +:: + + flytectl demo status + +` +) + +func demoClusterStatus(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + + return sandbox.PrintStatus(ctx, cli) +} diff --git a/flytectl/cmd/demo/status_test.go b/flytectl/cmd/demo/status_test.go new file mode 100644 index 0000000000..7fae8bc43a --- /dev/null +++ b/flytectl/cmd/demo/status_test.go @@ -0,0 +1,39 @@ +package demo + +import ( + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/stretchr/testify/assert" +) + +func TestDemoStatus(t *testing.T) { + t.Run("Demo status with zero result", func(t *testing.T) { + mockDocker := &mocks.Docker{} + s := testutils.Setup() + mockDocker.OnContainerList(s.Ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + docker.Client = mockDocker + err := demoClusterStatus(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Demo status with running", func(t *testing.T) { + s := testutils.Setup() + ctx := s.Ctx + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.Client = mockDocker + err := demoClusterStatus(ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + }) +} diff --git a/flytectl/cmd/demo/teardown.go b/flytectl/cmd/demo/teardown.go new file mode 100644 index 0000000000..2fc59769be --- /dev/null +++ b/flytectl/cmd/demo/teardown.go @@ -0,0 +1,32 @@ +package demo + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/sandbox" + + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + teardownShort = "Cleans up the demo environment" + teardownLong = ` +Removes the demo cluster and all the Flyte config created by 'demo start': +:: + + flytectl demo teardown + + +Usage +` +) + +func teardownDemoCluster(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + return sandbox.Teardown(ctx, cli, sandboxCmdConfig.DefaultTeardownFlags) +} diff --git a/flytectl/cmd/demo/teardown_test.go b/flytectl/cmd/demo/teardown_test.go new file mode 100644 index 0000000000..cfe8bcfea7 --- /dev/null +++ b/flytectl/cmd/demo/teardown_test.go @@ -0,0 +1,90 @@ +package demo + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/configutil" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/flyteorg/flytectl/pkg/k8s" + k8sMocks "github.com/flyteorg/flytectl/pkg/k8s/mocks" + "github.com/flyteorg/flytectl/pkg/sandbox" + "github.com/flyteorg/flytectl/pkg/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var containers []types.Container + +func TestTearDownFunc(t *testing.T) { + container1 := types.Container{ + ID: "FlyteSandboxClusterName", + Names: []string{ + docker.FlyteSandboxClusterName, + }, + } + containers = append(containers, container1) + + t.Run("SuccessKeepVolume", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + mockK8sContextMgr := &k8sMocks.ContextOps{} + k8s.ContextMgr = mockK8sContextMgr + mockK8sContextMgr.OnRemoveContextMatch(mock.Anything).Return(nil) + err := sandbox.Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.Nil(t, err) + }) + t.Run("SuccessRemoveVolume", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + mockDocker.OnVolumeRemove(ctx, docker.FlyteSandboxVolumeName, true).Return(nil) + mockK8sContextMgr := &k8sMocks.ContextOps{} + k8s.ContextMgr = mockK8sContextMgr + mockK8sContextMgr.OnRemoveContextMatch(mock.Anything).Return(nil) + err := sandbox.Teardown( + ctx, + mockDocker, + &sandboxCmdConfig.TeardownFlags{Volume: true}, + ) + assert.Nil(t, err) + }) + t.Run("ErrorOnContainerRemove", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(fmt.Errorf("err")) + err := sandbox.Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.NotNil(t, err) + }) + + t.Run("ErrorOnContainerList", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(nil, fmt.Errorf("err")) + err := sandbox.Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.NotNil(t, err) + }) + +} + +func TestTearDownClusterFunc(t *testing.T) { + _ = util.SetupFlyteDir() + _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) + s := testutils.Setup() + ctx := s.Ctx + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + docker.Client = mockDocker + err := teardownDemoCluster(ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) +} diff --git a/flytectl/cmd/get/execution.go b/flytectl/cmd/get/execution.go new file mode 100644 index 0000000000..da597c5187 --- /dev/null +++ b/flytectl/cmd/get/execution.go @@ -0,0 +1,170 @@ +package get + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/flyteorg/flytectl/pkg/bubbletea" + "github.com/flyteorg/flytectl/pkg/filters" + "github.com/flyteorg/flytectl/pkg/printer" + + "github.com/golang/protobuf/proto" +) + +const ( + executionShort = "Gets execution resources." + executionLong = ` +Retrieve all executions within the project and domain. +:: + + flytectl get execution -p flytesnacks -d development + +.. note:: + The terms execution/executions are interchangeable in these commands. + +Retrieve executions by name within the project and domain. +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r + +Retrieve all the executions with filters. +:: + + flytectl get execution -p flytesnacks -d development --filter.fieldSelector="execution.phase in (FAILED;SUCCEEDED),execution.duration<200" + + +Retrieve executions as per the specified limit and sorting parameters. +:: + + flytectl get execution -p flytesnacks -d development --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve executions present in other pages by specifying the limit and page number. + +:: + + flytectl get -p flytesnacks -d development execution --filter.limit=10 --filter.page=2 + +Retrieve executions within the project and domain in YAML format. + +:: + + flytectl get execution -p flytesnacks -d development -o yaml + +Retrieve executions within the project and domain in JSON format. + +:: + + flytectl get execution -p flytesnacks -d development -o json + + +Get more details of the execution using the --details flag, which shows node and task executions. +The default view is a tree view, and the TABLE view format is not supported on this view. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --details + +Fetch execution details in YAML format. In this view, only node details are available. For task, pass the --nodeID flag. +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --details -o yaml + +Fetch task executions on a specific node using the --nodeID flag. Use the nodeID attribute given by the node details view. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --nodeID n0 + +Task execution view is available in YAML/JSON format too. The following example showcases YAML, where the output contains input and output data of each node. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --nodeID n0 -o yaml + +Usage +` +) + +var hundredChars = 100 + +var executionColumns = []printer.Column{ + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Launch Plan Name", JSONPath: "$.spec.launchPlan.name"}, + {Header: "Version", JSONPath: "$.spec.launchPlan.version"}, + {Header: "Type", JSONPath: "$.spec.launchPlan.resourceType"}, + {Header: "Phase", JSONPath: "$.closure.phase"}, + {Header: "Scheduled Time", JSONPath: "$.spec.metadata.scheduledAt"}, + {Header: "Started", JSONPath: "$.closure.startedAt"}, + {Header: "Elapsed Time", JSONPath: "$.closure.duration"}, + {Header: "Abort data (Trunc)", JSONPath: "$.closure.abortMetadata[\"cause\"]", TruncateTo: &hundredChars}, + {Header: "Error data (Trunc)", JSONPath: "$.closure.error[\"message\"]", TruncateTo: &hundredChars}, +} + +func ExecutionToProtoMessages(l []*admin.Execution) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func getCallBack(ctx context.Context, cmdCtx cmdCore.CommandContext) bubbletea.DataCallback { + return func(filter filters.Filters) []proto.Message { + executionList, err := cmdCtx.AdminFetcherExt().ListExecution(ctx, config.GetConfig().Project, config.GetConfig().Domain, filter) + if err != nil { + return nil + } + return ExecutionToProtoMessages(executionList.Executions) + } +} + +func getExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + adminPrinter := printer.Printer{} + var executions []*admin.Execution + if len(args) > 0 { + name := args[0] + exec, err := cmdCtx.AdminFetcherExt().FetchExecution(ctx, name, config.GetConfig().Project, config.GetConfig().Domain) + if err != nil { + return err + } + executions = append(executions, exec) + logger.Infof(ctx, "Retrieved %v executions", len(executions)) + + if execution.DefaultConfig.Details || len(execution.DefaultConfig.NodeID) > 0 { + // Fetching Node execution details + nExecDetailsForView, err := getExecutionDetails(ctx, config.GetConfig().Project, config.GetConfig().Domain, name, execution.DefaultConfig.NodeID, cmdCtx) + if err != nil { + return err + } + // o/p format of table is not supported on the details. TODO: Add tree format in printer + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + fmt.Println("TABLE format is not supported on detailed view and defaults to tree view. Choose either json/yaml") + nodeExecTree := createNodeDetailsTreeView(nil, nExecDetailsForView) + fmt.Println(nodeExecTree.Print()) + return nil + } + return adminPrinter.PrintInterface(config.GetConfig().MustOutputFormat(), nodeExecutionColumns, nExecDetailsForView) + } + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), executionColumns, + ExecutionToProtoMessages(executions)...) + } + executionList, err := cmdCtx.AdminFetcherExt().ListExecution(ctx, config.GetConfig().Project, config.GetConfig().Domain, execution.DefaultConfig.Filter) + if err != nil { + return err + } + logger.Infof(ctx, "Retrieved %v executions", len(executionList.Executions)) + + if config.GetConfig().Interactive { + bubbletea.Paginator(executionColumns, getCallBack(ctx, cmdCtx)) + return nil + } + + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), executionColumns, + ExecutionToProtoMessages(executionList.Executions)...) +} diff --git a/flytectl/cmd/get/execution_test.go b/flytectl/cmd/get/execution_test.go new file mode 100644 index 0000000000..635ccce875 --- /dev/null +++ b/flytectl/cmd/get/execution_test.go @@ -0,0 +1,364 @@ +package get + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + nodeID = "node-id" +) + +func getExecutionSetup() { + config.GetConfig().Project = projectValue + config.GetConfig().Domain = domainValue + config.GetConfig().Output = output + execution.DefaultConfig.Details = false + execution.DefaultConfig.NodeID = "" +} + +func TestListExecutionFunc(t *testing.T) { + getExecutionSetup() + s := setup() + executionResponse := &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: executionNameValue, + }, + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: launchPlanNameValue, + Version: launchPlanVersionValue, + }, + }, + Closure: &admin.ExecutionClosure{ + WorkflowId: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: workflowNameValue, + Version: workflowVersionValue, + }, + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } + executions := []*admin.Execution{executionResponse} + executionList := &admin.ExecutionList{ + Executions: executions, + } + s.FetcherExt.OnListExecutionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(executionList, nil) + err := getExecutionFunc(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "ListExecution", s.Ctx, projectValue, domainValue, execution.DefaultConfig.Filter) +} + +func TestListExecutionFuncWithError(t *testing.T) { + getExecutionSetup() + _ = &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: executionNameValue, + }, + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: launchPlanNameValue, + Version: launchPlanVersionValue, + }, + }, + Closure: &admin.ExecutionClosure{ + WorkflowId: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: workflowNameValue, + Version: workflowVersionValue, + }, + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } + s := setup() + s.FetcherExt.OnListExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("executions NotFound")) + err := getExecutionFunc(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, err, errors.New("executions NotFound")) + s.FetcherExt.AssertCalled(t, "ListExecution", s.Ctx, projectValue, domainValue, execution.DefaultConfig.Filter) +} + +func TestGetExecutionFunc(t *testing.T) { + getExecutionSetup() + executionResponse := &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: executionNameValue, + }, + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: launchPlanNameValue, + Version: launchPlanVersionValue, + }, + }, + Closure: &admin.ExecutionClosure{ + WorkflowId: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: workflowNameValue, + Version: workflowVersionValue, + }, + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } + args := []string{executionNameValue} + s := setup() + + s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(executionResponse, nil) + err := getExecutionFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchExecution", s.Ctx, executionNameValue, projectValue, domainValue) +} + +func TestGetExecutionFuncForDetails(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionSetup() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + execution.DefaultConfig.Details = true + args := []string{dummyExec} + mockFetcherExt.OnFetchExecutionMatch(ctx, dummyExec, dummyProject, dummyDomain).Return(&admin.Execution{}, nil) + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nil, fmt.Errorf("unable to fetch details")) + err := getExecutionFunc(ctx, args, mockCmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to fetch details"), err) +} + +func TestGetExecutionFuncWithIOData(t *testing.T) { + t.Run("successful inputs outputs", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionSetup() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + execution.DefaultConfig.NodeID = nodeID + args := []string{dummyExec} + + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecutions := []*admin.NodeExecution{nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + + inputs := map[string]*core.Literal{ + "val1": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 110, + }, + }, + }, + }, + }, + }, + } + outputs := map[string]*core.Literal{ + "o2": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 120, + }, + }, + }, + }, + }, + }, + } + dataResp := &admin.NodeExecutionGetDataResponse{ + FullOutputs: &core.LiteralMap{ + Literals: inputs, + }, + FullInputs: &core.LiteralMap{ + Literals: outputs, + }, + } + mockFetcherExt.OnFetchExecutionMatch(ctx, dummyExec, dummyProject, dummyDomain).Return(&admin.Execution{}, nil) + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, "n0", dummyExec, dummyProject, dummyDomain).Return(&admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + }, nil) + mockFetcherExt.OnFetchNodeExecutionDataMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(dataResp, nil) + + err := getExecutionFunc(ctx, args, mockCmdCtx) + assert.Nil(t, err) + }) + t.Run("fetch data error from admin", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionSetup() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + execution.DefaultConfig.NodeID = nodeID + args := []string{dummyExec} + + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecutions := []*admin.NodeExecution{nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + mockFetcherExt.OnFetchExecutionMatch(ctx, dummyExec, dummyProject, dummyDomain).Return(&admin.Execution{}, nil) + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(&admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + }, nil) + mockFetcherExt.OnFetchNodeExecutionDataMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(nil, fmt.Errorf("error in fetching data")) + + err := getExecutionFunc(ctx, args, mockCmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error in fetching data"), err) + }) + t.Run("Table test successful cases", func(t *testing.T) { + tests := []struct { + outputFormat string + nodeID string + want error + }{ + {outputFormat: "table", nodeID: "", want: nil}, + {outputFormat: "table", nodeID: "n0", want: nil}, + {outputFormat: "yaml", nodeID: "", want: nil}, + {outputFormat: "yaml", nodeID: "n0", want: nil}, + {outputFormat: "yaml", nodeID: "n1", want: nil}, + } + + args := []string{dummyExec} + for _, tt := range tests { + s := testutils.SetupWithExt() + config.GetConfig().Output = tt.outputFormat + execution.DefaultConfig.NodeID = tt.nodeID + + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + nodeExecToTaskExec := map[string]*admin.TaskExecutionList{} + + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecToTaskExec["n0"] = &admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + } + + nodeExecutions := []*admin.NodeExecution{nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + inputs := map[string]*core.Literal{ + "val1": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 100, + }, + }, + }, + }, + }, + }, + } + outputs := map[string]*core.Literal{ + "o2": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 120, + }, + }, + }, + }, + }, + }, + } + dataResp := &admin.NodeExecutionGetDataResponse{ + FullOutputs: &core.LiteralMap{ + Literals: inputs, + }, + FullInputs: &core.LiteralMap{ + Literals: outputs, + }, + } + + mockFetcherExt.OnFetchExecutionMatch(ctx, dummyExec, dummyProject, dummyDomain).Return(&admin.Execution{}, nil) + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, "n0", dummyExec, dummyProject, dummyDomain).Return(&admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + }, nil) + mockFetcherExt.OnFetchNodeExecutionDataMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(dataResp, nil) + got := getExecutionFunc(ctx, args, mockCmdCtx) + assert.Equal(t, tt.want, got) + } + }) +} + +func TestGetExecutionFuncWithError(t *testing.T) { + ctx := context.Background() + getExecutionSetup() + _ = &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: executionNameValue, + }, + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: launchPlanNameValue, + Version: launchPlanVersionValue, + }, + }, + Closure: &admin.ExecutionClosure{ + WorkflowId: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: workflowNameValue, + Version: workflowVersionValue, + }, + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } + + args := []string{executionNameValue} + s := testutils.SetupWithExt() + s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("execution NotFound")) + err := getExecutionFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, err, errors.New("execution NotFound")) + s.FetcherExt.AssertCalled(t, "FetchExecution", ctx, "e124", "dummyProject", "dummyDomain") +} diff --git a/flytectl/cmd/get/execution_util.go b/flytectl/cmd/get/execution_util.go new file mode 100644 index 0000000000..f9ad49a2e5 --- /dev/null +++ b/flytectl/cmd/get/execution_util.go @@ -0,0 +1,167 @@ +package get + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + structpb "github.com/golang/protobuf/ptypes/struct" + + "gopkg.in/yaml.v3" + + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" +) + +// ExecutionConfig is duplicated struct from create with the same structure. This is to avoid the circular dependency. Only works with go-yaml. +// TODO : replace this with a cleaner design +type ExecutionConfig struct { + IamRoleARN string `yaml:"iamRoleARN"` + Inputs map[string]yaml.Node `yaml:"inputs"` + Envs map[string]string `yaml:"envs"` + KubeServiceAcct string `yaml:"kubeServiceAcct"` + TargetDomain string `yaml:"targetDomain"` + TargetProject string `yaml:"targetProject"` + Task string `yaml:"task,omitempty"` + Version string `yaml:"version"` + Workflow string `yaml:"workflow,omitempty"` +} + +func WriteExecConfigToFile(executionConfig ExecutionConfig, fileName string) error { + d, err := yaml.Marshal(executionConfig) + if err != nil { + fmt.Printf("error: %v", err) + } + if _, err = os.Stat(fileName); err == nil { + if !cmdUtil.AskForConfirmation(fmt.Sprintf("warning file %v will be overwritten", fileName), os.Stdin) { + return errors.New("backup the file before continuing") + } + } + return ioutil.WriteFile(fileName, d, 0600) +} + +func CreateAndWriteExecConfigForTask(task *admin.Task, fileName string) error { + var err error + executionConfig := ExecutionConfig{Task: task.Id.Name, Version: task.Id.Version} + if executionConfig.Inputs, err = ParamMapForTask(task); err != nil { + return err + } + return WriteExecConfigToFile(executionConfig, fileName) +} + +func CreateAndWriteExecConfigForWorkflow(wlp *admin.LaunchPlan, fileName string) error { + var err error + executionConfig := ExecutionConfig{Workflow: wlp.Id.Name, Version: wlp.Id.Version} + if executionConfig.Inputs, err = ParamMapForWorkflow(wlp); err != nil { + return err + } + return WriteExecConfigToFile(executionConfig, fileName) +} + +func TaskInputs(task *admin.Task) map[string]*core.Variable { + taskInputs := map[string]*core.Variable{} + if task == nil || task.Closure == nil { + return taskInputs + } + if task.Closure.CompiledTask == nil { + return taskInputs + } + if task.Closure.CompiledTask.Template == nil { + return taskInputs + } + if task.Closure.CompiledTask.Template.Interface == nil { + return taskInputs + } + if task.Closure.CompiledTask.Template.Interface.Inputs == nil { + return taskInputs + } + return task.Closure.CompiledTask.Template.Interface.Inputs.Variables +} + +func ParamMapForTask(task *admin.Task) (map[string]yaml.Node, error) { + taskInputs := TaskInputs(task) + paramMap := make(map[string]yaml.Node, len(taskInputs)) + for k, v := range taskInputs { + varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Type) + if err != nil { + fmt.Println("error creating default value for literal type ", v.Type) + return nil, err + } + var nativeLiteral interface{} + if nativeLiteral, err = coreutils.ExtractFromLiteral(varTypeValue); err != nil { + return nil, err + } + + if k == v.Description { + // a: # a isn't very helpful + paramMap[k], err = getCommentedYamlNode(nativeLiteral, "") + } else { + paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Description) + } + if err != nil { + return nil, err + } + } + return paramMap, nil +} + +func WorkflowParams(lp *admin.LaunchPlan) map[string]*core.Parameter { + workflowParams := map[string]*core.Parameter{} + if lp == nil || lp.Spec == nil { + return workflowParams + } + if lp.Spec.DefaultInputs == nil { + return workflowParams + } + return lp.Spec.DefaultInputs.Parameters +} + +func ParamMapForWorkflow(lp *admin.LaunchPlan) (map[string]yaml.Node, error) { + workflowParams := WorkflowParams(lp) + paramMap := make(map[string]yaml.Node, len(workflowParams)) + for k, v := range workflowParams { + varTypeValue, err := coreutils.MakeDefaultLiteralForType(v.Var.Type) + if err != nil { + fmt.Println("error creating default value for literal type ", v.Var.Type) + return nil, err + } + var nativeLiteral interface{} + if nativeLiteral, err = coreutils.ExtractFromLiteral(varTypeValue); err != nil { + return nil, err + } + // Override if there is a default value + if paramsDefault, ok := v.Behavior.(*core.Parameter_Default); ok { + if nativeLiteral, err = coreutils.ExtractFromLiteral(paramsDefault.Default); err != nil { + return nil, err + } + } + if k == v.Var.Description { + // a: # a isn't very helpful + paramMap[k], err = getCommentedYamlNode(nativeLiteral, "") + } else { + paramMap[k], err = getCommentedYamlNode(nativeLiteral, v.Var.Description) + } + + if err != nil { + return nil, err + } + } + return paramMap, nil +} + +func getCommentedYamlNode(input interface{}, comment string) (yaml.Node, error) { + var node yaml.Node + var err error + + if s, ok := input.(*structpb.Struct); ok { + err = node.Encode(s.AsMap()) + } else { + err = node.Encode(input) + } + + node.LineComment = comment + return node, err +} diff --git a/flytectl/cmd/get/execution_util_test.go b/flytectl/cmd/get/execution_util_test.go new file mode 100644 index 0000000000..ff316ca4e0 --- /dev/null +++ b/flytectl/cmd/get/execution_util_test.go @@ -0,0 +1,90 @@ +package get + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/stretchr/testify/assert" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestTaskInputs(t *testing.T) { + taskInputs := map[string]*core.Variable{} + t.Run("nil task", func(t *testing.T) { + retValue := TaskInputs(nil) + assert.Equal(t, taskInputs, retValue) + }) + t.Run("valid inputs", func(t *testing.T) { + task := createTask() + retValue := TaskInputs(task) + assert.Equal(t, task.Closure.CompiledTask.Template.Interface.Inputs.Variables, retValue) + }) + t.Run("closure compiled task nil", func(t *testing.T) { + task := createTask() + task.Closure.CompiledTask = nil + retValue := TaskInputs(task) + assert.Equal(t, taskInputs, retValue) + }) + t.Run("closure compiled task template nil", func(t *testing.T) { + task := createTask() + task.Closure.CompiledTask.Template = nil + retValue := TaskInputs(task) + assert.Equal(t, taskInputs, retValue) + }) + t.Run("closure compiled task template interface nil", func(t *testing.T) { + task := createTask() + task.Closure.CompiledTask.Template.Interface = nil + retValue := TaskInputs(task) + assert.Equal(t, taskInputs, retValue) + }) + t.Run("closure compiled task template interface input nil", func(t *testing.T) { + task := createTask() + task.Closure.CompiledTask.Template.Interface.Inputs = nil + retValue := TaskInputs(task) + assert.Equal(t, taskInputs, retValue) + }) +} + +func createTask() *admin.Task { + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + } + + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + inputs := &core.VariableMap{ + Variables: variableMap, + } + typedInterface := &core.TypedInterface{ + Inputs: inputs, + } + taskTemplate := &core.TaskTemplate{ + Interface: typedInterface, + } + compiledTask := &core.CompiledTask{ + Template: taskTemplate, + } + return &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v2", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledTask: compiledTask, + }, + } +} diff --git a/flytectl/cmd/get/get.go b/flytectl/cmd/get/get.go new file mode 100644 index 0000000000..7c1b5ea164 --- /dev/null +++ b/flytectl/cmd/get/get.go @@ -0,0 +1,74 @@ +package get + +import ( + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + "github.com/flyteorg/flytectl/cmd/config/subcommand/task" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflow" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + getCmdShort = `Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects.` + getCmdLong = ` +To fetch a project, use the following command: +:: + + flytectl get project +` +) + +// CreateGetCommand will return get command +func CreateGetCommand() *cobra.Command { + getCmd := &cobra.Command{ + Use: "get", + Short: getCmdShort, + Long: getCmdLong, + } + + getResourcesFuncs := map[string]cmdcore.CommandEntry{ + "project": {CmdFunc: getProjectsFunc, Aliases: []string{"projects"}, ProjectDomainNotRequired: true, + Short: projectShort, + Long: projectLong, PFlagProvider: project.DefaultConfig}, + "task": {CmdFunc: getTaskFunc, Aliases: []string{"tasks"}, Short: taskShort, + Long: taskLong, PFlagProvider: task.DefaultConfig}, + "workflow": {CmdFunc: getWorkflowFunc, Aliases: []string{"workflows"}, Short: workflowShort, + Long: workflowLong, PFlagProvider: workflow.DefaultConfig}, + "launchplan": {CmdFunc: getLaunchPlanFunc, Aliases: []string{"launchplans"}, Short: launchPlanShort, + Long: launchPlanLong, PFlagProvider: launchplan.DefaultConfig}, + "execution": {CmdFunc: getExecutionFunc, Aliases: []string{"executions"}, Short: executionShort, + Long: executionLong, PFlagProvider: execution.DefaultConfig}, + "task-resource-attribute": {CmdFunc: getTaskResourceAttributes, Aliases: []string{"task-resource-attributes"}, + Short: taskResourceAttributesShort, + Long: taskResourceAttributesLong, PFlagProvider: taskresourceattribute.DefaultFetchConfig}, + "cluster-resource-attribute": {CmdFunc: getClusterResourceAttributes, Aliases: []string{"cluster-resource-attributes"}, + Short: clusterResourceAttributesShort, + Long: clusterResourceAttributesLong, PFlagProvider: clusterresourceattribute.DefaultFetchConfig}, + "execution-queue-attribute": {CmdFunc: getExecutionQueueAttributes, Aliases: []string{"execution-queue-attributes"}, + Short: executionQueueAttributesShort, + Long: executionQueueAttributesLong, PFlagProvider: executionqueueattribute.DefaultFetchConfig}, + "execution-cluster-label": {CmdFunc: getExecutionClusterLabel, Aliases: []string{"execution-cluster-labels"}, + Short: executionClusterLabelShort, + Long: executionClusterLabelLong, PFlagProvider: executionclusterlabel.DefaultFetchConfig}, + "plugin-override": {CmdFunc: getPluginOverridesFunc, Aliases: []string{"plugin-overrides"}, + Short: pluginOverrideShort, + Long: pluginOverrideLong, PFlagProvider: pluginoverride.DefaultFetchConfig}, + "workflow-execution-config": {CmdFunc: getWorkflowExecutionConfigFunc, Aliases: []string{"workflow-execution-config"}, + Short: workflowExecutionConfigShort, + Long: workflowExecutionConfigLong, PFlagProvider: workflowexecutionconfig.DefaultFetchConfig, ProjectDomainNotRequired: true}, + } + + cmdcore.AddCommands(getCmd, getResourcesFuncs) + + return getCmd +} diff --git a/flytectl/cmd/get/get_test.go b/flytectl/cmd/get/get_test.go new file mode 100644 index 0000000000..76452c0b01 --- /dev/null +++ b/flytectl/cmd/get/get_test.go @@ -0,0 +1,55 @@ +package get + +import ( + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flytectl/cmd/testutils" +) + +const projectValue = "dummyProject" +const domainValue = "dummyDomain" +const output = "json" +const executionNameValue = "e124" +const launchPlanNameValue = "lp_name" +const launchPlanVersionValue = "lp_version" +const workflowNameValue = "wf_name" +const workflowVersionValue = "wf_version" +const testDataFolder = "../testdata/" + +var setup = testutils.Setup + +const ( + testDataTempFile = "temp-output-file" + testDataNotExistentTempFile = "non-existent-dir/temp-output-file" +) + +func TestCreateGetCommand(t *testing.T) { + getCommand := CreateGetCommand() + assert.Equal(t, getCommand.Use, "get") + assert.Equal(t, getCommand.Short, "Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects.") + fmt.Println(getCommand.Commands()) + assert.Equal(t, len(getCommand.Commands()), 11) + cmdNouns := getCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + useArray := []string{"cluster-resource-attribute", "execution", "execution-cluster-label", + "execution-queue-attribute", "launchplan", "plugin-override", "project", "task", "task-resource-attribute", "workflow", "workflow-execution-config"} + aliases := [][]string{{"cluster-resource-attributes"}, {"executions"}, {"execution-cluster-labels"}, + {"execution-queue-attributes"}, {"launchplans"}, {"plugin-overrides"}, {"projects"}, {"tasks"}, {"task-resource-attributes"}, {"workflows"}, {"workflow-execution-config"}} + shortArray := []string{clusterResourceAttributesShort, executionShort, executionClusterLabelShort, executionQueueAttributesShort, launchPlanShort, + pluginOverrideShort, projectShort, taskShort, taskResourceAttributesShort, workflowShort, workflowExecutionConfigShort} + longArray := []string{clusterResourceAttributesLong, executionLong, executionClusterLabelLong, executionQueueAttributesLong, launchPlanLong, + pluginOverrideLong, projectLong, taskLong, taskResourceAttributesLong, workflowLong, workflowExecutionConfigLong} + for i := range cmdNouns { + assert.Equal(t, cmdNouns[i].Use, useArray[i]) + assert.Equal(t, cmdNouns[i].Aliases, aliases[i]) + assert.Equal(t, cmdNouns[i].Short, shortArray[i]) + assert.Equal(t, cmdNouns[i].Long, longArray[i]) + } +} diff --git a/flytectl/cmd/get/launch_plan.go b/flytectl/cmd/get/launch_plan.go new file mode 100644 index 0000000000..1e23041679 --- /dev/null +++ b/flytectl/cmd/get/launch_plan.go @@ -0,0 +1,247 @@ +package get + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/ext" + "github.com/flyteorg/flytectl/pkg/printer" + "github.com/golang/protobuf/proto" +) + +const ( + launchPlanShort = "Gets the launch plan resources." + launchPlanLong = ` +Retrieve all launch plans within the project and domain: +:: + + flytectl get launchplan -p flytesnacks -d development + +.. note:: + + The terms launchplan/launchplans are interchangeable in these commands. + +Retrieve a launch plan by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet + + +Retrieve the latest version of the task by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet --latest + +Retrieve a particular version of the launch plan by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet --version v2 + +Retrieve all launch plans for a given workflow name: + +:: + + flytectl get launchplan -p flytesnacks -d development --workflow core.flyte_basics.lp.go_greet + +Retrieve all the launch plans with filters: +:: + + flytectl get launchplan -p flytesnacks -d development --filter.fieldSelector="name=core.basic.lp.go_greet" + +Retrieve all active launch plans: +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml --filter.fieldSelector "state=1" + +Retrieve all archived launch plans: +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml --filter.fieldSelector "state=0" + +Retrieve launch plans entity search across all versions with filters: +:: + + flytectl get launchplan -p flytesnacks -d development k8s_spark.dataframe_passing.my_smart_schema --filter.fieldSelector="version=v1" + + +Retrieve all the launch plans with limit and sorting: +:: + + flytectl get launchplan -p flytesnacks -d development --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve launch plans present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development launchplan --filter.limit=10 --filter.page=2 + +Retrieve all launch plans within the project and domain in YAML format: + +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml + +Retrieve all launch plans the within the project and domain in JSON format: + +:: + + flytectl get launchplan -p flytesnacks -d development -o json + +Retrieve a launch plan within the project and domain as per a version and generates the execution spec file; the file can be used to launch the execution using the 'create execution' command: + +:: + + flytectl get launchplan -d development -p flytesnacks core.control_flow.merge_sort.merge_sort --execFile execution_spec.yaml + +The generated file would look similar to this: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + numbers: + - 0 + numbers_count: 0 + run_local_at_count: 10 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + version: v3 + workflow: core.control_flow.merge_sort.merge_sort + +Check the :ref:` + "`create execution section`" + ` on how to launch one using the generated file. +Usage +` +) + +// Column structure for get specific launchplan +var launchplanColumns = []printer.Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Type", JSONPath: "$.closure.compiledTask.template.type"}, + {Header: "State", JSONPath: "$.spec.state"}, + {Header: "Schedule", JSONPath: "$.spec.entityMetadata.schedule"}, + {Header: "Inputs", JSONPath: "$.closure.expectedInputs.parameters." + printer.DefaultFormattedDescriptionsKey + ".var.description"}, + {Header: "Outputs", JSONPath: "$.closure.expectedOutputs.variables." + printer.DefaultFormattedDescriptionsKey + ".description"}, +} + +// Column structure for get all launchplans +var launchplansColumns = []printer.Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Type", JSONPath: "$.id.resourceType"}, + {Header: "CreatedAt", JSONPath: "$.closure.createdAt"}, +} + +func LaunchplanToProtoMessages(l []*admin.LaunchPlan) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func LaunchplanToTableProtoMessages(l []*admin.LaunchPlan) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + m := proto.Clone(m).(*admin.LaunchPlan) + if m.Closure != nil { + if m.Closure.ExpectedInputs != nil && m.Closure.ExpectedInputs.Parameters != nil { + printer.FormatParameterDescriptions(m.Closure.ExpectedInputs.Parameters) + } + if m.Closure.ExpectedOutputs != nil && m.Closure.ExpectedOutputs.Variables != nil { + printer.FormatVariableDescriptions(m.Closure.ExpectedOutputs.Variables) + } + } + messages = append(messages, m) + } + return messages +} + +func getLaunchPlanFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + launchPlanPrinter := printer.Printer{} + var launchPlans []*admin.LaunchPlan + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) == 1 { + name := args[0] + var err error + if launchPlans, err = FetchLPForName(ctx, cmdCtx.AdminFetcherExt(), name, project, domain); err != nil { + return err + } + logger.Debugf(ctx, "Retrieved %v launch plans", len(launchPlans)) + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + err = launchPlanPrinter.Print(config.GetConfig().MustOutputFormat(), launchplanColumns, + LaunchplanToTableProtoMessages(launchPlans)...) + } else { + err = launchPlanPrinter.Print(config.GetConfig().MustOutputFormat(), launchplanColumns, + LaunchplanToProtoMessages(launchPlans)...) + } + if err != nil { + return err + } + return nil + } + + if len(launchplan.DefaultConfig.Workflow) > 0 { + if len(launchplan.DefaultConfig.Filter.FieldSelector) > 0 { + return fmt.Errorf("fieldSelector cannot be specified with workflow flag") + } + launchplan.DefaultConfig.Filter.FieldSelector = fmt.Sprintf("workflow.name=%s", launchplan.DefaultConfig.Workflow) + } + + launchPlans, err := cmdCtx.AdminFetcherExt().FetchAllVerOfLP(ctx, "", config.GetConfig().Project, config.GetConfig().Domain, launchplan.DefaultConfig.Filter) + if err != nil { + return err + } + + logger.Debugf(ctx, "Retrieved %v launch plans", len(launchPlans)) + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + return launchPlanPrinter.Print(config.GetConfig().MustOutputFormat(), launchplansColumns, + LaunchplanToTableProtoMessages(launchPlans)...) + } + return launchPlanPrinter.Print(config.GetConfig().MustOutputFormat(), launchplansColumns, + LaunchplanToProtoMessages(launchPlans)...) + +} + +// FetchLPForName fetches the launchplan give it name. +func FetchLPForName(ctx context.Context, fetcher ext.AdminFetcherExtInterface, name, project, + domain string) ([]*admin.LaunchPlan, error) { + var launchPlans []*admin.LaunchPlan + var lp *admin.LaunchPlan + var err error + if launchplan.DefaultConfig.Latest { + if lp, err = fetcher.FetchLPLatestVersion(ctx, name, project, domain, launchplan.DefaultConfig.Filter); err != nil { + return nil, err + } + launchPlans = append(launchPlans, lp) + } else if launchplan.DefaultConfig.Version != "" { + if lp, err = fetcher.FetchLPVersion(ctx, name, launchplan.DefaultConfig.Version, project, domain); err != nil { + return nil, err + } + launchPlans = append(launchPlans, lp) + } else { + launchPlans, err = fetcher.FetchAllVerOfLP(ctx, name, project, domain, launchplan.DefaultConfig.Filter) + if err != nil { + return nil, err + } + } + if launchplan.DefaultConfig.ExecFile != "" { + // There would be atleast one launchplan object when code reaches here and hence the length + // assertion is not required. + lp = launchPlans[0] + // Only write the first task from the tasks object. + if err = CreateAndWriteExecConfigForWorkflow(lp, launchplan.DefaultConfig.ExecFile); err != nil { + return nil, err + } + } + return launchPlans, nil +} diff --git a/flytectl/cmd/get/launch_plan_test.go b/flytectl/cmd/get/launch_plan_test.go new file mode 100644 index 0000000000..f7f8daf242 --- /dev/null +++ b/flytectl/cmd/get/launch_plan_test.go @@ -0,0 +1,409 @@ +package get + +import ( + "fmt" + "os" + "testing" + + structpb "github.com/golang/protobuf/ptypes/struct" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flytectl/pkg/printer" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + "github.com/flyteorg/flytectl/pkg/ext/mocks" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + resourceListRequest *admin.ResourceListRequest + resourceGetRequest *admin.ResourceListRequest + objectGetRequest *admin.ObjectGetRequest + namedIDRequest *admin.NamedEntityIdentifierListRequest + launchPlanListResponse *admin.LaunchPlanList + filteredLaunchPlanListResponse *admin.LaunchPlanList + argsLp []string + namedIdentifierList *admin.NamedEntityIdentifierList + launchPlan2 *admin.LaunchPlan +) + +func getLaunchPlanSetup() { + // TODO: migrate to new command context from testutils + argsLp = []string{"launchplan1"} + parameterMap := map[string]*core.Parameter{ + "numbers": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "short desc", + }, + }, + "numbers_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + Description: "long description will be truncated in table", + }, + }, + "run_local_at_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + Description: "run_local_at_count", + }, + Behavior: &core.Parameter_Default{ + Default: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 10, + }, + }, + }, + }, + }, + }, + }, + }, + "generic": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }, + }, + Description: "generic", + }, + Behavior: &core.Parameter_Default{ + Default: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Generic{ + Generic: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + }, + }, + }, + }, + }, + } + launchPlan1 := &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "launchplan1", + Version: "v1", + }, + Spec: &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Name: "workflow1", + }, + DefaultInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + } + launchPlan2 = &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "launchplan1", + Version: "v2", + }, + Spec: &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Name: "workflow2", + }, + DefaultInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + } + + launchPlans := []*admin.LaunchPlan{launchPlan2, launchPlan1} + + resourceListRequest = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + }, + } + + resourceGetRequest = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: argsLp[0], + }, + } + + launchPlanListResponse = &admin.LaunchPlanList{ + LaunchPlans: launchPlans, + } + + filteredLaunchPlanListResponse = &admin.LaunchPlanList{ + LaunchPlans: []*admin.LaunchPlan{launchPlan2}, + } + + objectGetRequest = &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Project: projectValue, + Domain: domainValue, + Name: argsLp[0], + Version: "v2", + }, + } + + namedIDRequest = &admin.NamedEntityIdentifierListRequest{ + Project: projectValue, + Domain: domainValue, + } + + var entities []*admin.NamedEntityIdentifier + id1 := &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: "launchplan1", + } + id2 := &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: "launchplan2", + } + entities = append(entities, id1, id2) + namedIdentifierList = &admin.NamedEntityIdentifierList{ + Entities: entities, + } + + launchplan.DefaultConfig.Latest = false + launchplan.DefaultConfig.Version = "" + launchplan.DefaultConfig.ExecFile = "" + launchplan.DefaultConfig.Filter = filters.Filters{} +} + +func TestGetLaunchPlanFuncWithError(t *testing.T) { + t.Run("failure fetch latest", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + launchplan.DefaultConfig.Latest = true + launchplan.DefaultConfig.Filter = filters.Filters{} + mockFetcher.OnFetchLPLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching latest version")) + _, err := FetchLPForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching version ", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + launchplan.DefaultConfig.Version = "v1" + launchplan.DefaultConfig.Filter = filters.Filters{} + mockFetcher.OnFetchLPVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("error fetching version")) + _, err := FetchLPForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching all version ", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + launchplan.DefaultConfig.Filter = filters.Filters{} + launchplan.DefaultConfig.Filter = filters.Filters{} + mockFetcher := new(mocks.AdminFetcherExtInterface) + mockFetcher.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all version")) + _, err := FetchLPForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching ", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) + s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(nil, fmt.Errorf("error fetching all version")) + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(nil, fmt.Errorf("error fetching lanuch plan")) + s.MockAdminClient.OnListLaunchPlanIdsMatch(s.Ctx, namedIDRequest).Return(nil, fmt.Errorf("error listing lanuch plan ids")) + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.NotNil(t, err) + }) + + t.Run("failure fetching list", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + argsLp = []string{} + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) + s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceListRequest).Return(nil, fmt.Errorf("error fetching all version")) + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.NotNil(t, err) + }) +} + +func TestGetLaunchPlanFunc(t *testing.T) { + s := setup() + getLaunchPlanSetup() + s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchAllVerOfLP", s.Ctx, "launchplan1", "dummyProject", "dummyDomain", launchplan.DefaultConfig.Filter) + s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) +} + +func TestGetLaunchPlanFuncLatest(t *testing.T) { + s := setup() + getLaunchPlanSetup() + launchplan.DefaultConfig.Latest = true + launchplan.DefaultConfig.Filter = filters.Filters{} + s.FetcherExt.OnFetchLPLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan2, nil) + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchLPLatestVersion", s.Ctx, "launchplan1", projectValue, domainValue, launchplan.DefaultConfig.Filter) + s.TearDownAndVerify(t, `{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}}`) +} + +func TestGetLaunchPlanWithVersion(t *testing.T) { + s := testutils.SetupWithExt() + getLaunchPlanSetup() + launchplan.DefaultConfig.Version = "v2" + s.FetcherExt.OnFetchLPVersion(s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain").Return(launchPlan2, nil) + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchLPVersion", s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain") + s.TearDownAndVerify(t, `{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}}`) +} + +func TestGetLaunchPlans(t *testing.T) { + t.Run("no workflow filter", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + argsLp = []string{} + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) + }) + t.Run("workflow filter", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{ + FieldSelector: "workflow.name=workflow2", + }).Return(launchPlanListResponse.LaunchPlans, nil) + argsLp = []string{} + launchplan.DefaultConfig.Workflow = "workflow2" + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) + }) + t.Run("workflow filter error", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + argsLp = []string{} + launchplan.DefaultConfig.Workflow = "workflow2" + launchplan.DefaultConfig.Filter.FieldSelector = "workflow.name" + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("fieldSelector cannot be specified with workflow flag"), err) + }) +} + +func TestGetLaunchPlansWithExecFile(t *testing.T) { + s := testutils.SetupWithExt() + getLaunchPlanSetup() + s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceListRequest).Return(launchPlanListResponse, nil) + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) + s.MockAdminClient.OnListLaunchPlanIdsMatch(s.Ctx, namedIDRequest).Return(namedIdentifierList, nil) + s.FetcherExt.OnFetchLPVersion(s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain").Return(launchPlan2, nil) + launchplan.DefaultConfig.Version = "v2" + launchplan.DefaultConfig.ExecFile = testDataFolder + "exec_file" + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + + data, err := os.ReadFile(launchplan.DefaultConfig.ExecFile) + assert.Nil(t, err) + assert.Equal(t, `iamRoleARN: "" +inputs: + generic: + foo: foo + numbers: + - 0 + numbers_count: 0 # long description will be truncated in table + run_local_at_count: 10 # short desc +envs: {} +kubeServiceAcct: "" +targetDomain: "" +targetProject: "" +version: v2 +workflow: launchplan1 +`, string(data)) + os.Remove(launchplan.DefaultConfig.ExecFile) + + s.FetcherExt.AssertCalled(t, "FetchLPVersion", s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain") + s.TearDownAndVerify(t, `{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}}`) +} + +func TestGetLaunchPlanTableFunc(t *testing.T) { + s := testutils.SetupWithExt() + getLaunchPlanSetup() + s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(launchPlanListResponse, nil) + s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) + s.MockAdminClient.OnListLaunchPlanIdsMatch(s.Ctx, namedIDRequest).Return(namedIdentifierList, nil) + s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) + config.GetConfig().Output = printer.OutputFormatTABLE.String() + err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchAllVerOfLP", s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}) + s.TearDownAndVerify(t, ` +--------- ------------- ------ ------- ---------- --------------------------- --------- +| VERSION | NAME | TYPE | STATE | SCHEDULE | INPUTS | OUTPUTS | +--------- ------------- ------ ------- ---------- --------------------------- --------- +| v2 | launchplan1 | | | | generic | | +| | | | | | numbers: short desc | | +| | | | | | numbers_count: long de... | | +| | | | | | run_local_at_count | | +--------- ------------- ------ ------- ---------- --------------------------- --------- +| v1 | launchplan1 | | | | generic | | +| | | | | | numbers: short desc | | +| | | | | | numbers_count: long de... | | +| | | | | | run_local_at_count | | +--------- ------------- ------ ------- ---------- --------------------------- --------- +2 rows`) +} diff --git a/flytectl/cmd/get/matchable_attribute_util.go b/flytectl/cmd/get/matchable_attribute_util.go new file mode 100644 index 0000000000..df4468c639 --- /dev/null +++ b/flytectl/cmd/get/matchable_attribute_util.go @@ -0,0 +1,41 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/pkg/ext" +) + +func FetchAndUnDecorateMatchableAttr(ctx context.Context, project, domain, workflowName string, + fetcher ext.AdminFetcherExtInterface, unDecorator sconfig.MatchableAttributeUnDecorator, rsType admin.MatchableResource) error { + if len(workflowName) > 0 { + // Fetch the workflow attribute from the admin + workflowAttr, err := fetcher.FetchWorkflowAttributes(ctx, + project, domain, workflowName, rsType) + if err != nil { + return err + } + // Update the shadow config with the fetched taskResourceAttribute which can then be written to a file which can then be called for an update. + unDecorator.UnDecorate(workflowAttr.GetAttributes().GetMatchingAttributes()) + } else { + if len(domain) == 0 { + projectAttr, err := fetcher.FetchProjectAttributes(ctx, project, rsType) + if err != nil { + return err + } + // Update the shadow config with the fetched taskResourceAttribute which can then be written to a file which can then be called for an update. + unDecorator.UnDecorate(projectAttr.GetAttributes().GetMatchingAttributes()) + } else { + // Fetch the project domain attribute from the admin + projectDomainAttr, err := fetcher.FetchProjectDomainAttributes(ctx, project, domain, rsType) + if err != nil { + return err + } + // Update the shadow config with the fetched taskResourceAttribute which can then be written to a file which can then be called for an update. + unDecorator.UnDecorate(projectDomainAttr.GetAttributes().GetMatchingAttributes()) + } + } + return nil +} diff --git a/flytectl/cmd/get/matchable_cluster_resource_attribute.go b/flytectl/cmd/get/matchable_cluster_resource_attribute.go new file mode 100644 index 0000000000..a9fd0b4082 --- /dev/null +++ b/flytectl/cmd/get/matchable_cluster_resource_attribute.go @@ -0,0 +1,88 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + clusterResourceAttributesShort = "Gets matchable resources of cluster resource attributes." + clusterResourceAttributesLong = ` +Retrieve cluster resource attributes for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get cluster-resource-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","attributes":{"buzz":"lightyear","foo":"bar"}} + +Retrieve cluster resource attributes for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get cluster-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","attributes":{"buzz":"lightyear","foo":"bar"}} + +Write the cluster resource attributes to a file. If there are no cluster resource attributes, the command throws an error. +The config file is written to cra.yaml file. +Example: content of cra.yaml: + +:: + + flytectl get task-resource-attribute --attrFile cra.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +Usage +` +) + +func getClusterResourceAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for ClusterResourceAttribute. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + clusterResourceAttrFileConfig := clusterresourceattribute.AttrFileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the attribute file name from the command line config + fileName := clusterresourceattribute.DefaultFetchConfig.AttrFile + + // Updates the taskResourceAttrFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &clusterResourceAttrFileConfig, admin.MatchableResource_CLUSTER_RESOURCE); err != nil { + return err + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(clusterResourceAttrFileConfig, fileName); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go new file mode 100644 index 0000000000..6e601f3c1c --- /dev/null +++ b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go @@ -0,0 +1,130 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getClusterResourceAttributeSetup() { + clusterresourceattribute.DefaultFetchConfig = &clusterresourceattribute.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetClusterResourceAttributes(t *testing.T) { + clusterResourceAttr := &admin.ClusterResourceAttributes{ + Attributes: map[string]string{"foo": "bar"}, + } + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: clusterResourceAttr, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: clusterResourceAttr, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","attributes":{"foo":"bar"}}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getClusterResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getClusterResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","attributes":{"foo":"bar"}}`) + }) + t.Run("failed get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getClusterResourceAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getClusterResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_CLUSTER_RESOURCE) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/matchable_execution_cluster_label.go b/flytectl/cmd/get/matchable_execution_cluster_label.go new file mode 100644 index 0000000000..e47e17ff5f --- /dev/null +++ b/flytectl/cmd/get/matchable_execution_cluster_label.go @@ -0,0 +1,87 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionClusterLabelShort = "Gets matchable resources of execution cluster label." + executionClusterLabelLong = ` +Retrieve the execution cluster label for a given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl get execution-cluster-label -p flytesnacks -d development + +The output would look like: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","value":"foo"} + +Retrieve the execution cluster label for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get execution-cluster-label -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","value":"foo"} + +Write the execution cluster label to a file. If there is no execution cluster label, the command throws an error. +The config file is written to ecl.yaml file. +Example: content of ecl.yaml: + +:: + + flytectl get execution-cluster-label --attrFile ecl.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +Usage +` +) + +func getExecutionClusterLabel(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for ExecutionClusterLabel. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + executionClusterLabelFileConfig := executionclusterlabel.FileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the attribute file name from the command line config + fileName := executionclusterlabel.DefaultFetchConfig.AttrFile + + // Updates the taskResourceAttrFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &executionClusterLabelFileConfig, admin.MatchableResource_EXECUTION_CLUSTER_LABEL); err != nil { + return err + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(executionClusterLabelFileConfig, fileName); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/get/matchable_execution_cluster_label_test.go b/flytectl/cmd/get/matchable_execution_cluster_label_test.go new file mode 100644 index 0000000000..e418a5fc27 --- /dev/null +++ b/flytectl/cmd/get/matchable_execution_cluster_label_test.go @@ -0,0 +1,130 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getExecutionClusterLabelSetup() { + executionclusterlabel.DefaultFetchConfig = &executionclusterlabel.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetExecutionClusterLabel(t *testing.T) { + executionClusterLabel := &admin.ExecutionClusterLabel{ + Value: "foo", + } + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: executionClusterLabel, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: executionClusterLabel, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","value":"foo"}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + executionclusterlabel.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + executionclusterlabel.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed to get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getExecutionClusterLabel(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getExecutionClusterLabel(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","value":"foo"}`) + }) + t.Run("failed to get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionClusterLabelSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getExecutionClusterLabel(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_EXECUTION_CLUSTER_LABEL) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/matchable_execution_queue_attribute.go b/flytectl/cmd/get/matchable_execution_queue_attribute.go new file mode 100644 index 0000000000..f5ac300a8a --- /dev/null +++ b/flytectl/cmd/get/matchable_execution_queue_attribute.go @@ -0,0 +1,90 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionQueueAttributesShort = "Gets matchable resources of execution queue attributes." + executionQueueAttributesLong = ` +Retrieve the execution queue attribute for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get execution-queue-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","tags":["foo", "bar"]} + +Retrieve the execution queue attribute for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get execution-queue-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","tags":["foo", "bar"]} + +Write the execution queue attribute to a file. If there are no execution queue attributes, the command throws an error. +The config file is written to era.yaml file. +Example: content of era.yaml: + +:: + + flytectl get execution-queue-attribute --attrFile era.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +Usage +` +) + +func getExecutionQueueAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for ExecutionQueueAttribute. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + executionQueueAttrFileConfig := executionqueueattribute.AttrFileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the attribute file name from the command line config + fileName := executionqueueattribute.DefaultFetchConfig.AttrFile + + // Updates the taskResourceAttrFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &executionQueueAttrFileConfig, admin.MatchableResource_EXECUTION_QUEUE); err != nil { + return err + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(executionQueueAttrFileConfig, fileName); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/get/matchable_execution_queue_attribute_test.go b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go new file mode 100644 index 0000000000..a47b9c78ae --- /dev/null +++ b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go @@ -0,0 +1,130 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getExecutionQueueAttributeSetup() { + executionqueueattribute.DefaultFetchConfig = &executionqueueattribute.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetExecutionQueueAttributes(t *testing.T) { + executionQueueAttr := &admin.ExecutionQueueAttributes{ + Tags: []string{"foo", "bar"}, + } + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: executionQueueAttr, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: executionQueueAttr, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","tags":["foo","bar"]}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + executionqueueattribute.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + executionqueueattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getExecutionQueueAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getExecutionQueueAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","tags":["foo","bar"]}`) + }) + t.Run("failed get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getExecutionQueueAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getExecutionQueueAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_EXECUTION_QUEUE) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/matchable_plugin_override.go b/flytectl/cmd/get/matchable_plugin_override.go new file mode 100644 index 0000000000..be6a1004fe --- /dev/null +++ b/flytectl/cmd/get/matchable_plugin_override.go @@ -0,0 +1,109 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + pluginOverrideShort = "Gets matchable resources of plugin override." + pluginOverrideLong = ` +Retrieve the plugin override for the given project and domain. +For project flytesnacks and development domain: + +:: + + flytectl get plugin-override -p flytesnacks -d development + +Example: output from the command + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "overrides": [{ + "task_type": "python_task", + "plugin_id": ["pluginoverride1", "pluginoverride2"], + "missing_plugin_behavior": 0 + }] + } + +Retrieve the plugin override for the given project, domain, and workflow. +For project flytesnacks, development domain and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get plugin-override -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "workflow": "core.control_flow.merge_sort.merge_sort" + "overrides": [{ + "task_type": "python_task", + "plugin_id": ["pluginoverride1", "pluginoverride2"], + "missing_plugin_behavior": 0 + }] + } + +Write plugin overrides to a file. If there are no plugin overrides, the command throws an error. +The config file is written to po.yaml file. +Example: content of po.yaml: + +:: + + flytectl get plugin-override --attrFile po.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +Usage +` +) + +func getPluginOverridesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for PluginOverrides. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + pluginOverrideFileConfig := pluginoverride.FileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the plugin overrides from the command line config + fileName := pluginoverride.DefaultFetchConfig.AttrFile + + // Updates the pluginOverrideFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &pluginOverrideFileConfig, admin.MatchableResource_PLUGIN_OVERRIDE); err != nil { + return err + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(pluginOverrideFileConfig, fileName); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/get/matchable_plugin_override_test.go b/flytectl/cmd/get/matchable_plugin_override_test.go new file mode 100644 index 0000000000..a4f6c1256f --- /dev/null +++ b/flytectl/cmd/get/matchable_plugin_override_test.go @@ -0,0 +1,140 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getPluginOverrideSetup() { + pluginoverride.DefaultFetchConfig = &pluginoverride.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetPluginOverride(t *testing.T) { + pluginOverride1 := &admin.PluginOverride{ + TaskType: "python_task", + PluginId: []string{"plugin-override1", "plugin-override2"}, + MissingPluginBehavior: admin.PluginOverride_FAIL, + } + pluginOverride2 := &admin.PluginOverride{ + TaskType: "java_task", + PluginId: []string{"plugin-override3", "plugin-override3"}, + MissingPluginBehavior: admin.PluginOverride_USE_DEFAULT, + } + pluginOverrides := []*admin.PluginOverride{pluginOverride1, pluginOverride2} + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: &admin.PluginOverrides{ + Overrides: pluginOverrides, + }, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: &admin.PluginOverrides{ + Overrides: pluginOverrides, + }, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getPluginOverridesFunc(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + pluginoverride.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getPluginOverridesFunc(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + pluginoverride.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getPluginOverridesFunc(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getPluginOverridesFunc(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getPluginOverridesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) + }) + t.Run("failed get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getPluginOverrideSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getPluginOverridesFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", admin.MatchableResource_PLUGIN_OVERRIDE) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/matchable_task_resource_attribute.go b/flytectl/cmd/get/matchable_task_resource_attribute.go new file mode 100644 index 0000000000..c27a0b663d --- /dev/null +++ b/flytectl/cmd/get/matchable_task_resource_attribute.go @@ -0,0 +1,92 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + taskResourceAttributesShort = "Gets matchable resources of task attributes." + taskResourceAttributesLong = ` +Retrieve task resource attributes for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get task-resource-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"450Mi"}} + +Retrieve task resource attributes for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get task-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"450Mi"}} + + +Write the task resource attributes to a file. If there are no task resource attributes, a file would be populated with the basic data. +The config file is written to tra.yaml file. +Example: content of tra.yaml: + +:: + + flytectl get -p flytesnacks -d development task-resource-attribute --attrFile tra.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +Usage +` +) + +func getTaskResourceAttributes(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for TaskResourceAttribute. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + taskResourceAttrFileConfig := taskresourceattribute.TaskResourceAttrFileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the attribute file name from the command line config + fileName := taskresourceattribute.DefaultFetchConfig.AttrFile + + // Updates the taskResourceAttrFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &taskResourceAttrFileConfig, admin.MatchableResource_TASK_RESOURCE); err != nil { + return err + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(taskResourceAttrFileConfig, fileName); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/get/matchable_task_resource_attribute_test.go b/flytectl/cmd/get/matchable_task_resource_attribute_test.go new file mode 100644 index 0000000000..c21ddcd9df --- /dev/null +++ b/flytectl/cmd/get/matchable_task_resource_attribute_test.go @@ -0,0 +1,137 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getTaskResourceAttributeSetup() { + taskresourceattribute.DefaultFetchConfig = &taskresourceattribute.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetTaskResourceAttributes(t *testing.T) { + taskResourceAttr := &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: "1", + Memory: "150Mi", + }, + Limits: &admin.TaskResourceSpec{ + Cpu: "2", + Memory: "350Mi", + }, + } + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: taskResourceAttr, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: taskResourceAttr, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + taskresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + taskresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getTaskResourceAttributes(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getTaskResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) + }) + t.Run("failed get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getTaskResourceAttributeSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getTaskResourceAttributes(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_TASK_RESOURCE) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/matchable_workflow_execution_config.go b/flytectl/cmd/get/matchable_workflow_execution_config.go new file mode 100644 index 0000000000..ba414f7833 --- /dev/null +++ b/flytectl/cmd/get/matchable_workflow_execution_config.go @@ -0,0 +1,184 @@ +package get + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + workflowExecutionConfigShort = "Gets matchable resources of workflow execution config." + workflowExecutionConfigLong = ` +Retrieve workflow execution config for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain: + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "max_parallelism": 5 + } + +Retrieve workflow execution config for the project, domain, and workflow. +For project flytesnacks, development domain and workflow 'core.control_flow.merge_sort.merge_sort': + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "workflow": "core.control_flow.merge_sort.merge_sort" + "max_parallelism": 5 + } + +Write the workflow execution config to a file. If there are no workflow execution config, the command throws an error. +The config file is written to wec.yaml file. +Example: content of wec.yaml: + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + +Generate a sample workflow execution config file to be used for creating a new workflow execution config at project domain + +:: + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml --gen + + +.. code-block:: yaml + + annotations: + values: + cliAnnotationKey: cliAnnotationValue + domain: development + labels: + values: + cliLabelKey: cliLabelValue + max_parallelism: 10 + project: flytesnacks + raw_output_data_config: + output_location_prefix: cliOutputLocationPrefix + security_context: + run_as: + k8s_service_account: default + + + +Generate a sample workflow execution config file to be used for creating a new workflow execution config at project domain workflow level + +:: + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml flytectl get workflow-execution-config --gen + + +.. code-block:: yaml + + annotations: + values: + cliAnnotationKey: cliAnnotationValue + domain: development + labels: + values: + cliLabelKey: cliLabelValue + max_parallelism: 10 + project: flytesnacks + workflow: k8s_spark.dataframe_passing.my_smart_structured_dataset + raw_output_data_config: + output_location_prefix: cliOutputLocationPrefix + security_context: + run_as: + k8s_service_account: default + + +Usage +` +) + +func getWorkflowExecutionConfigFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var project string + var domain string + var workflowName string + + // Get the project domain workflow name parameters from the command line. Project and domain are mandatory for this command + project = config.GetConfig().Project + domain = config.GetConfig().Domain + if len(args) == 1 { + workflowName = args[0] + } + // Construct a shadow config for WorkflowExecutionConfig. The shadow config is not using ProjectDomainAttribute/Workflowattribute directly inorder to simplify the inputs. + workflowExecutionConfigFileConfig := workflowexecutionconfig.FileConfig{Project: project, Domain: domain, Workflow: workflowName} + // Get the workflow execution config from the command line config + fileName := workflowexecutionconfig.DefaultFetchConfig.AttrFile + + // Updates the workflowExecutionConfigFileConfig with the fetched matchable attribute + if err := FetchAndUnDecorateMatchableAttr(ctx, project, domain, workflowName, cmdCtx.AdminFetcherExt(), + &workflowExecutionConfigFileConfig, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG); err != nil { + if grpcError := status.Code(err); grpcError == codes.NotFound && workflowexecutionconfig.DefaultFetchConfig.Gen { + fmt.Println("Generating a sample workflow execution config file") + workflowExecutionConfigFileConfig = getSampleWorkflowExecutionFileConfig(project, domain, workflowName) + } else { + return err + } + } + + // Write the config to the file which can be used for update + if err := sconfig.DumpTaskResourceAttr(workflowExecutionConfigFileConfig, fileName); err != nil { + return err + } + return nil +} + +func getSampleWorkflowExecutionFileConfig(project, domain, workflow string) workflowexecutionconfig.FileConfig { + return workflowexecutionconfig.FileConfig{ + Project: project, + Domain: domain, + Workflow: workflow, + WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ + MaxParallelism: 10, + SecurityContext: &core.SecurityContext{ + RunAs: &core.Identity{ + K8SServiceAccount: "default", + IamRole: "", + }, + }, + Labels: &admin.Labels{ + Values: map[string]string{"cliLabelKey": "cliLabelValue"}, + }, + Annotations: &admin.Annotations{ + Values: map[string]string{"cliAnnotationKey": "cliAnnotationValue"}, + }, + RawOutputDataConfig: &admin.RawOutputDataConfig{ + OutputLocationPrefix: "cliOutputLocationPrefix", + }, + }, + } +} diff --git a/flytectl/cmd/get/matchable_workflow_execution_config_test.go b/flytectl/cmd/get/matchable_workflow_execution_config_test.go new file mode 100644 index 0000000000..0f9658c7f4 --- /dev/null +++ b/flytectl/cmd/get/matchable_workflow_execution_config_test.go @@ -0,0 +1,130 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func getWorkflowExecutionConfigSetup() { + workflowexecutionconfig.DefaultFetchConfig = &workflowexecutionconfig.AttrFetchConfig{} + // Clean up the temp directory. + _ = os.Remove(testDataTempFile) +} + +func TestGetWorkflowExecutionConfig(t *testing.T) { + executionClusterLabel := &admin.WorkflowExecutionConfig{ + MaxParallelism: 5, + } + projectDomainResp := &admin.ProjectDomainAttributesGetResponse{ + Attributes: &admin.ProjectDomainAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: executionClusterLabel, + }, + }, + }, + } + workflowResp := &admin.WorkflowAttributesGetResponse{ + Attributes: &admin.WorkflowAttributes{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Workflow: "workflow", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: executionClusterLabel, + }, + }, + }, + } + t.Run("successful get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getWorkflowExecutionConfigFunc(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","max_parallelism":5}`) + }) + t.Run("successful get project domain attribute and write to file", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getWorkflowExecutionConfigFunc(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) + }) + t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(projectDomainResp, nil) + err := getWorkflowExecutionConfigFunc(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("error dumping in file due to open non-existent-dir/temp-output-file: no such file or directory"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, ``) + }) + t.Run("failed get project domain attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + // No args implying project domain attribute deletion + s.FetcherExt.OnFetchProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getWorkflowExecutionConfigFunc(s.Ctx, []string{}, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchProjectDomainAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, ``) + }) + t.Run("successful get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(workflowResp, nil) + err := getWorkflowExecutionConfigFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","max_parallelism":5}`) + }) + t.Run("failed get workflow attribute", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowExecutionConfigSetup() + args := []string{"workflow"} + s.FetcherExt.OnFetchWorkflowAttributesMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed to fetch response")) + err := getWorkflowExecutionConfigFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to fetch response"), err) + s.FetcherExt.AssertCalled(t, "FetchWorkflowAttributes", + s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, "workflow", + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) + s.TearDownAndVerify(t, ``) + }) +} diff --git a/flytectl/cmd/get/node_execution.go b/flytectl/cmd/get/node_execution.go new file mode 100644 index 0000000000..bec0a5a9a0 --- /dev/null +++ b/flytectl/cmd/get/node_execution.go @@ -0,0 +1,277 @@ +package get + +import ( + "bytes" + "context" + "fmt" + "sort" + "strconv" + + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/printer" + + "github.com/disiqueira/gotree" + "github.com/golang/protobuf/jsonpb" +) + +var nodeExecutionColumns = []printer.Column{ + {Header: "Name", JSONPath: "$.id.nodeID"}, + {Header: "Exec", JSONPath: "$.id.executionId.name"}, + {Header: "EndedAt", JSONPath: "$.endedAt"}, + {Header: "StartedAt", JSONPath: "$.startedAt"}, + {Header: "Phase", JSONPath: "$.phase"}, +} + +const ( + taskAttemptPrefix = "Attempt :" + taskExecPrefix = "Task - " + taskTypePrefix = "Task Type - " + taskReasonPrefix = "Reason - " + taskMetadataPrefix = "Metadata" + taskGeneratedNamePrefix = "Generated Name : " + taskPluginIDPrefix = "Plugin Identifier : " + taskExtResourcesPrefix = "External Resources" + taskExtResourcePrefix = "Ext Resource : " + taskExtResourceTokenPrefix = "Ext Resource Token : " //nolint + taskResourcePrefix = "Resource Pool Info" + taskLogsPrefix = "Logs :" + outputsPrefix = "Outputs :" + taskLogsNamePrefix = "Name :" + taskLogURIPrefix = "URI :" + hyphenPrefix = " - " +) + +// TaskExecution wrapper around admin.TaskExecution +type TaskExecution struct { + *admin.TaskExecution +} + +// MarshalJSON overridden method to json marshalling to use jsonpb +func (in *TaskExecution) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + marshaller := jsonpb.Marshaler{} + if err := marshaller.Marshal(&buf, in.TaskExecution); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON overridden method to json unmarshalling to use jsonpb +func (in *TaskExecution) UnmarshalJSON(b []byte) error { + in.TaskExecution = &admin.TaskExecution{} + return jsonpb.Unmarshal(bytes.NewReader(b), in.TaskExecution) +} + +type NodeExecution struct { + *admin.NodeExecution +} + +// MarshalJSON overridden method to json marshalling to use jsonpb +func (in *NodeExecution) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + marshaller := jsonpb.Marshaler{} + if err := marshaller.Marshal(&buf, in.NodeExecution); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON overridden method to json unmarshalling to use jsonpb +func (in *NodeExecution) UnmarshalJSON(b []byte) error { + *in = NodeExecution{} + return jsonpb.Unmarshal(bytes.NewReader(b), in) +} + +// NodeExecutionClosure forms a wrapper around admin.NodeExecution and also fetches the childnodes , task execs +// and input/output on the node executions from the admin api's. +type NodeExecutionClosure struct { + NodeExec *NodeExecution `json:"node_exec,omitempty"` + ChildNodes []*NodeExecutionClosure `json:"child_nodes,omitempty"` + TaskExecutions []*TaskExecutionClosure `json:"task_execs,omitempty"` + // Inputs for the node + Inputs map[string]interface{} `json:"inputs,omitempty"` + // Outputs for the node + Outputs map[string]interface{} `json:"outputs,omitempty"` +} + +// TaskExecutionClosure wrapper around TaskExecution +type TaskExecutionClosure struct { + *TaskExecution +} + +func getExecutionDetails(ctx context.Context, project, domain, execName, nodeName string, cmdCtx cmdCore.CommandContext) ([]*NodeExecutionClosure, error) { + // Fetching Node execution details + nodeExecDetailsMap := map[string]*NodeExecutionClosure{} + nExecDetails, err := getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, "", nodeExecDetailsMap, cmdCtx) + if err != nil { + return nil, err + } + + var nExecDetailsForView []*NodeExecutionClosure + // Get the execution details only for the nodeId passed + if len(nodeName) > 0 { + // Fetch the last one which contains the nodeId details as previous ones are used to reach the nodeId + if nodeExecDetailsMap[nodeName] != nil { + nExecDetailsForView = append(nExecDetailsForView, nodeExecDetailsMap[nodeName]) + } + } else { + nExecDetailsForView = nExecDetails + } + + sort.Slice(nExecDetailsForView[:], func(i, j int) bool { + return nExecDetailsForView[i].NodeExec.Closure.CreatedAt.AsTime().Before(nExecDetailsForView[j].NodeExec.Closure.CreatedAt.AsTime()) + }) + + return nExecDetailsForView, nil +} + +func getNodeExecDetailsInt(ctx context.Context, project, domain, execName, nodeName, uniqueParentID string, + nodeExecDetailsMap map[string]*NodeExecutionClosure, cmdCtx cmdCore.CommandContext) ([]*NodeExecutionClosure, error) { + + nExecDetails, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionDetails(ctx, execName, project, domain, uniqueParentID) + if err != nil { + return nil, err + } + + var nodeExecClosures []*NodeExecutionClosure + for _, nodeExec := range nExecDetails.NodeExecutions { + nodeExecClosure := &NodeExecutionClosure{ + NodeExec: &NodeExecution{nodeExec}, + } + nodeExecClosures = append(nodeExecClosures, nodeExecClosure) + + // Check if this is parent node. If yes do recursive call to get child nodes. + if nodeExec.Metadata != nil && nodeExec.Metadata.IsParentNode { + nodeExecClosure.ChildNodes, err = getNodeExecDetailsInt(ctx, project, domain, execName, nodeName, nodeExec.Id.NodeId, nodeExecDetailsMap, cmdCtx) + if err != nil { + return nil, err + } + } else { + taskExecList, err := cmdCtx.AdminFetcherExt().FetchTaskExecutionsOnNode(ctx, + nodeExec.Id.NodeId, execName, project, domain) + if err != nil { + return nil, err + } + for _, taskExec := range taskExecList.TaskExecutions { + taskExecClosure := &TaskExecutionClosure{ + TaskExecution: &TaskExecution{taskExec}, + } + nodeExecClosure.TaskExecutions = append(nodeExecClosure.TaskExecutions, taskExecClosure) + } + // Fetch the node inputs and outputs + nExecDataResp, err := cmdCtx.AdminFetcherExt().FetchNodeExecutionData(ctx, nodeExec.Id.NodeId, execName, project, domain) + if err != nil { + return nil, err + } + // Extract the inputs from the literal map + nodeExecClosure.Inputs, err = extractLiteralMap(nExecDataResp.FullInputs) + if err != nil { + return nil, err + } + // Extract the outputs from the literal map + nodeExecClosure.Outputs, err = extractLiteralMap(nExecDataResp.FullOutputs) + if err != nil { + return nil, err + } + } + nodeExecDetailsMap[nodeExec.Id.NodeId] = nodeExecClosure + // Found the node + if len(nodeName) > 0 && nodeName == nodeExec.Id.NodeId { + return nodeExecClosures, err + } + } + return nodeExecClosures, nil +} + +func createNodeTaskExecTreeView(rootView gotree.Tree, taskExecClosures []*TaskExecutionClosure) { + if len(taskExecClosures) == 0 { + return + } + if rootView == nil { + rootView = gotree.New("") + } + // TODO: Replace this by filter to sort in the admin + sort.Slice(taskExecClosures[:], func(i, j int) bool { + return taskExecClosures[i].Id.RetryAttempt < taskExecClosures[j].Id.RetryAttempt + }) + for _, taskExecClosure := range taskExecClosures { + attemptView := rootView.Add(taskAttemptPrefix + strconv.Itoa(int(taskExecClosure.Id.RetryAttempt))) + attemptView.Add(taskExecPrefix + taskExecClosure.Closure.Phase.String() + + hyphenPrefix + taskExecClosure.Closure.CreatedAt.AsTime().String() + + hyphenPrefix + taskExecClosure.Closure.UpdatedAt.AsTime().String()) + attemptView.Add(taskTypePrefix + taskExecClosure.Closure.TaskType) + attemptView.Add(taskReasonPrefix + taskExecClosure.Closure.Reason) + if taskExecClosure.Closure.Metadata != nil { + metadata := attemptView.Add(taskMetadataPrefix) + metadata.Add(taskGeneratedNamePrefix + taskExecClosure.Closure.Metadata.GeneratedName) + metadata.Add(taskPluginIDPrefix + taskExecClosure.Closure.Metadata.PluginIdentifier) + extResourcesView := metadata.Add(taskExtResourcesPrefix) + for _, extResource := range taskExecClosure.Closure.Metadata.ExternalResources { + extResourcesView.Add(taskExtResourcePrefix + extResource.ExternalId) + } + resourcePoolInfoView := metadata.Add(taskResourcePrefix) + for _, rsPool := range taskExecClosure.Closure.Metadata.ResourcePoolInfo { + resourcePoolInfoView.Add(taskExtResourcePrefix + rsPool.Namespace) + resourcePoolInfoView.Add(taskExtResourceTokenPrefix + rsPool.AllocationToken) + } + } + + sort.Slice(taskExecClosure.Closure.Logs[:], func(i, j int) bool { + return taskExecClosure.Closure.Logs[i].Name < taskExecClosure.Closure.Logs[j].Name + }) + + logsView := attemptView.Add(taskLogsPrefix) + for _, logData := range taskExecClosure.Closure.Logs { + logsView.Add(taskLogsNamePrefix + logData.Name) + logsView.Add(taskLogURIPrefix + logData.Uri) + } + } +} + +func createNodeDetailsTreeView(rootView gotree.Tree, nodeExecutionClosures []*NodeExecutionClosure) gotree.Tree { + if rootView == nil { + rootView = gotree.New("") + } + if len(nodeExecutionClosures) == 0 { + return rootView + } + // TODO : Move to sorting using filters. + sort.Slice(nodeExecutionClosures[:], func(i, j int) bool { + return nodeExecutionClosures[i].NodeExec.Closure.CreatedAt.AsTime().Before(nodeExecutionClosures[j].NodeExec.Closure.CreatedAt.AsTime()) + }) + + for _, nodeExecWrapper := range nodeExecutionClosures { + nExecView := rootView.Add(nodeExecWrapper.NodeExec.Id.NodeId + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.Phase.String() + + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.CreatedAt.AsTime().String() + + hyphenPrefix + nodeExecWrapper.NodeExec.Closure.UpdatedAt.AsTime().String()) + if len(nodeExecWrapper.ChildNodes) > 0 { + createNodeDetailsTreeView(nExecView, nodeExecWrapper.ChildNodes) + } + createNodeTaskExecTreeView(nExecView, nodeExecWrapper.TaskExecutions) + if len(nodeExecWrapper.Outputs) > 0 { + outputsView := nExecView.Add(outputsPrefix) + for outputKey, outputVal := range nodeExecWrapper.Outputs { + outputsView.Add(fmt.Sprintf("%s: %v", outputKey, outputVal)) + } + } + } + return rootView +} + +func extractLiteralMap(literalMap *core.LiteralMap) (map[string]interface{}, error) { + m := make(map[string]interface{}) + if literalMap == nil || literalMap.Literals == nil { + return m, nil + } + for key, literalVal := range literalMap.Literals { + extractedLiteralVal, err := coreutils.ExtractFromLiteral(literalVal) + if err != nil { + return nil, err + } + m[key] = extractedLiteralVal + } + return m, nil +} diff --git a/flytectl/cmd/get/node_execution_test.go b/flytectl/cmd/get/node_execution_test.go new file mode 100644 index 0000000000..ca12baa41f --- /dev/null +++ b/flytectl/cmd/get/node_execution_test.go @@ -0,0 +1,325 @@ +package get + +import ( + "fmt" + "testing" + "time" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event" + + "github.com/disiqueira/gotree" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + dummyProject = "dummyProject" + dummyDomain = "dummyDomain" + dummyExec = "dummyExec" +) + +func TestCreateNodeDetailsTreeView(t *testing.T) { + + t.Run("empty node execution", func(t *testing.T) { + expectedRoot := gotree.New("") + treeRoot := createNodeDetailsTreeView(nil, nil) + assert.Equal(t, expectedRoot, treeRoot) + }) + + t.Run("successful simple node execution full view", func(t *testing.T) { + + nodeExec1 := createDummyNodeWithID("start-node", false) + nodeExec1Closure := NodeExecutionClosure{NodeExec: &NodeExecution{nodeExec1}} + taskExec11 := createDummyTaskExecutionForNode("start-node", "task11") + taskExec11Closure := TaskExecutionClosure{&TaskExecution{taskExec11}} + taskExec12 := createDummyTaskExecutionForNode("start-node", "task12") + taskExec12Closure := TaskExecutionClosure{&TaskExecution{taskExec12}} + + nodeExec1Closure.TaskExecutions = []*TaskExecutionClosure{&taskExec11Closure, &taskExec12Closure} + + nodeExec2 := createDummyNodeWithID("n0", false) + nodeExec2Closure := NodeExecutionClosure{NodeExec: &NodeExecution{nodeExec2}} + taskExec21 := createDummyTaskExecutionForNode("n0", "task21") + taskExec21Closure := TaskExecutionClosure{&TaskExecution{taskExec21}} + taskExec22 := createDummyTaskExecutionForNode("n0", "task22") + taskExec22Closure := TaskExecutionClosure{&TaskExecution{taskExec22}} + + nodeExec2Closure.TaskExecutions = []*TaskExecutionClosure{&taskExec21Closure, &taskExec22Closure} + + wrapperNodeExecutions := []*NodeExecutionClosure{&nodeExec1Closure, &nodeExec2Closure} + + treeRoot := createNodeDetailsTreeView(nil, wrapperNodeExecutions) + + assert.Equal(t, 2, len(treeRoot.Items())) + }) +} + +func createDummyNodeWithID(nodeID string, isParentNode bool) *admin.NodeExecution { + nodeExecution := &admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + NodeId: nodeID, + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: dummyProject, + Domain: dummyDomain, + Name: dummyExec, + }, + }, + InputUri: nodeID + "inputUri", + Metadata: &admin.NodeExecutionMetaData{ + IsParentNode: isParentNode, + }, + Closure: &admin.NodeExecutionClosure{ + OutputResult: &admin.NodeExecutionClosure_OutputUri{ + OutputUri: nodeID + "outputUri", + }, + Phase: core.NodeExecution_SUCCEEDED, + StartedAt: timestamppb.Now(), + Duration: &durationpb.Duration{Seconds: 100}, + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.Now(), + TargetMetadata: &admin.NodeExecutionClosure_WorkflowNodeMetadata{ + WorkflowNodeMetadata: &admin.WorkflowNodeMetadata{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: dummyProject, + Domain: dummyDomain, + Name: dummyExec, + }, + }, + }, + }, + } + return nodeExecution +} + +func createDummyTaskExecutionForNode(nodeID string, taskID string) *admin.TaskExecution { + taskLog1 := &core.TaskLog{ + Uri: nodeID + taskID + "logUri1", + Name: nodeID + taskID + "logName1", + MessageFormat: core.TaskLog_JSON, + Ttl: &durationpb.Duration{Seconds: 100}, + } + + taskLog2 := &core.TaskLog{ + Uri: nodeID + taskID + "logUri2", + Name: nodeID + taskID + "logName2", + MessageFormat: core.TaskLog_JSON, + Ttl: &durationpb.Duration{Seconds: 100}, + } + + taskLogs := []*core.TaskLog{taskLog1, taskLog2} + + extResourceInfo := &event.ExternalResourceInfo{ + ExternalId: nodeID + taskID + "externalId", + } + extResourceInfos := []*event.ExternalResourceInfo{extResourceInfo} + + resourcePoolInfo := &event.ResourcePoolInfo{ + AllocationToken: nodeID + taskID + "allocationToken", + Namespace: nodeID + taskID + "namespace", + } + resourcePoolInfos := []*event.ResourcePoolInfo{resourcePoolInfo} + + taskExec := &admin.TaskExecution{ + Id: &core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + Project: dummyProject, + Domain: dummyDomain, + Name: dummyExec, + ResourceType: core.ResourceType_TASK, + }, + }, + InputUri: nodeID + taskID + "inputUrlForTask", + Closure: &admin.TaskExecutionClosure{ + OutputResult: &admin.TaskExecutionClosure_OutputUri{ + OutputUri: nodeID + taskID + "outputUri-task", + }, + Phase: core.TaskExecution_SUCCEEDED, + Logs: taskLogs, + StartedAt: timestamppb.Now(), + Duration: &durationpb.Duration{Seconds: 100}, + CreatedAt: timestamppb.Now(), + UpdatedAt: timestamppb.New(time.Now()), + Reason: nodeID + taskID + "reason", + TaskType: nodeID + taskID + "taskType", + Metadata: &event.TaskExecutionMetadata{ + GeneratedName: nodeID + taskID + "generatedName", + ExternalResources: extResourceInfos, + ResourcePoolInfo: resourcePoolInfos, + PluginIdentifier: nodeID + taskID + "pluginId", + }, + }, + } + return taskExec +} + +func TestGetExecutionDetails(t *testing.T) { + t.Run("successful get details default view", func(t *testing.T) { + s := testutils.SetupWithExt() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + + nodeExecStart := createDummyNodeWithID("start-node", false) + nodeExecN2 := createDummyNodeWithID("n2", true) + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecutions := []*admin.NodeExecution{nodeExecStart, nodeExecN2, nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + + inputs := map[string]*core.Literal{ + "val1": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 100, + }, + }, + }, + }, + }, + }, + } + outputs := map[string]*core.Literal{ + "o2": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 120, + }, + }, + }, + }, + }, + }, + } + dataResp := &admin.NodeExecutionGetDataResponse{ + FullOutputs: &core.LiteralMap{ + Literals: inputs, + }, + FullInputs: &core.LiteralMap{ + Literals: outputs, + }, + } + + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "n2").Return(&admin.NodeExecutionList{}, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(&admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + }, nil) + mockFetcherExt.OnFetchNodeExecutionDataMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(dataResp, nil) + + nodeExecWrappers, err := getExecutionDetails(ctx, dummyProject, dummyDomain, dummyExec, "", mockCmdCtx) + assert.Nil(t, err) + assert.NotNil(t, nodeExecWrappers) + }) + + t.Run("successful get details default view for node-id", func(t *testing.T) { + s := testutils.SetupWithExt() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecutions := []*admin.NodeExecution{nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + + inputs := map[string]*core.Literal{ + "val1": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 100, + }, + }, + }, + }, + }, + }, + } + outputs := map[string]*core.Literal{ + "o2": &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 120, + }, + }, + }, + }, + }, + }, + } + dataResp := &admin.NodeExecutionGetDataResponse{ + FullOutputs: &core.LiteralMap{ + Literals: inputs, + }, + FullInputs: &core.LiteralMap{ + Literals: outputs, + }, + } + + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, "n0", dummyExec, dummyProject, dummyDomain).Return(&admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + }, nil) + mockFetcherExt.OnFetchNodeExecutionDataMatch(ctx, mock.Anything, dummyExec, dummyProject, dummyDomain).Return(dataResp, nil) + + nodeExecWrappers, err := getExecutionDetails(ctx, dummyProject, dummyDomain, dummyExec, "n0", mockCmdCtx) + assert.Nil(t, err) + assert.NotNil(t, nodeExecWrappers) + }) + + t.Run("failure task exec fetch", func(t *testing.T) { + s := testutils.SetupWithExt() + ctx := s.Ctx + mockCmdCtx := s.CmdCtx + mockFetcherExt := s.FetcherExt + nodeExecToTaskExec := map[string]*admin.TaskExecutionList{} + + nodeExec1 := createDummyNodeWithID("n0", false) + taskExec1 := createDummyTaskExecutionForNode("n0", "task21") + taskExec2 := createDummyTaskExecutionForNode("n0", "task22") + + nodeExecToTaskExec["n0"] = &admin.TaskExecutionList{ + TaskExecutions: []*admin.TaskExecution{taskExec1, taskExec2}, + } + + nodeExecutions := []*admin.NodeExecution{nodeExec1} + nodeExecList := &admin.NodeExecutionList{NodeExecutions: nodeExecutions} + + mockFetcherExt.OnFetchNodeExecutionDetailsMatch(ctx, dummyExec, dummyProject, dummyDomain, "").Return(nodeExecList, nil) + mockFetcherExt.OnFetchTaskExecutionsOnNodeMatch(ctx, "n0", dummyExec, dummyProject, dummyDomain).Return(nil, fmt.Errorf("unable to fetch task exec details")) + _, err := getExecutionDetails(ctx, dummyProject, dummyDomain, dummyExec, "", mockCmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to fetch task exec details"), err) + }) +} + +func TestExtractLiteralMapError(t *testing.T) { + literalMap, err := extractLiteralMap(nil) + assert.Nil(t, err) + assert.Equal(t, len(literalMap), 0) + + literalMap, err = extractLiteralMap(&core.LiteralMap{}) + assert.Nil(t, err) + assert.Equal(t, len(literalMap), 0) +} diff --git a/flytectl/cmd/get/project.go b/flytectl/cmd/get/project.go new file mode 100644 index 0000000000..bf4b1473b9 --- /dev/null +++ b/flytectl/cmd/get/project.go @@ -0,0 +1,104 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/golang/protobuf/proto" + + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/printer" +) + +const ( + projectShort = "Gets project resources" + projectLong = ` +Retrieve all the projects: +:: + + flytectl get project + +.. note:: + The terms project/projects are interchangeable in these commands. + +Retrieve project by name: + +:: + + flytectl get project flytesnacks + +Retrieve all the projects with filters: +:: + + flytectl get project --filter.fieldSelector="project.name=flytesnacks" + +Retrieve all the projects with limit and sorting: +:: + + flytectl get project --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve projects present in other pages by specifying the limit and page number: +:: + + flytectl get project --filter.limit=10 --filter.page=2 + +Retrieve all the projects in yaml format: + +:: + + flytectl get project -o yaml + +Retrieve all the projects in json format: + +:: + + flytectl get project -o json + +Usage +` +) + +var projectColumns = []printer.Column{ + {Header: "ID", JSONPath: "$.id"}, + {Header: "Name", JSONPath: "$.name"}, + {Header: "Description", JSONPath: "$.description"}, +} + +func ProjectToProtoMessages(l []*admin.Project) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func getProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + adminPrinter := printer.Printer{} + + projects, err := cmdCtx.AdminFetcherExt().ListProjects(ctx, project.DefaultConfig.Filter) + if err != nil { + return err + } + + if len(args) == 1 { + id := args[0] + logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects)) + for _, v := range projects.Projects { + if v.Id == id { + err := adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, v) + if err != nil { + return err + } + return nil + } + } + return nil + } + + logger.Debugf(ctx, "Retrieved %v projects", len(projects.Projects)) + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), projectColumns, ProjectToProtoMessages(projects.Projects)...) +} diff --git a/flytectl/cmd/get/project_test.go b/flytectl/cmd/get/project_test.go new file mode 100644 index 0000000000..7efed267e2 --- /dev/null +++ b/flytectl/cmd/get/project_test.go @@ -0,0 +1,92 @@ +package get + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/pkg/filters" + "github.com/stretchr/testify/assert" +) + +var ( + resourceListRequestProject *admin.ProjectListRequest + projectListResponse *admin.Projects + argsProject = []string{"flyteexample"} + project1 *admin.Project +) + +func getProjectSetup() { + resourceListRequestProject = &admin.ProjectListRequest{} + + project1 = &admin.Project{ + Id: "flyteexample", + Name: "flyteexample", + Domains: []*admin.Domain{ + { + Id: "development", + Name: "development", + }, + }, + } + + project2 := &admin.Project{ + Id: "flytesnacks", + Name: "flytesnacks", + Domains: []*admin.Domain{ + { + Id: "development", + Name: "development", + }, + }, + } + + projects := []*admin.Project{project1, project2} + + projectListResponse = &admin.Projects{ + Projects: projects, + } +} + +func TestListProjectFunc(t *testing.T) { + s := testutils.SetupWithExt() + getProjectSetup() + project.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListProjectsMatch(s.Ctx, resourceListRequestProject).Return(projectListResponse, nil) + s.FetcherExt.OnListProjects(s.Ctx, filters.Filters{}).Return(projectListResponse, nil) + err := getProjectsFunc(s.Ctx, argsProject, s.CmdCtx) + + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "ListProjects", s.Ctx, filters.Filters{}) +} + +func TestGetProjectFunc(t *testing.T) { + s := testutils.SetupWithExt() + getProjectSetup() + argsProject = []string{} + + project.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListProjectsMatch(s.Ctx, resourceListRequestProject).Return(projectListResponse, nil) + s.FetcherExt.OnListProjects(s.Ctx, filters.Filters{}).Return(projectListResponse, nil) + err := getProjectsFunc(s.Ctx, argsProject, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "ListProjects", s.Ctx, filters.Filters{}) +} + +func TestGetProjectFuncError(t *testing.T) { + s := testutils.SetupWithExt() + getProjectSetup() + project.DefaultConfig.Filter = filters.Filters{ + FieldSelector: "hello=", + } + s.MockAdminClient.OnListProjectsMatch(s.Ctx, resourceListRequestProject).Return(nil, fmt.Errorf("Please add a valid field selector")) + s.FetcherExt.OnListProjects(s.Ctx, filters.Filters{ + FieldSelector: "hello=", + }).Return(nil, fmt.Errorf("Please add a valid field selector")) + err := getProjectsFunc(s.Ctx, argsProject, s.CmdCtx) + assert.NotNil(t, err) +} diff --git a/flytectl/cmd/get/task.go b/flytectl/cmd/get/task.go new file mode 100644 index 0000000000..b5e1b13d8b --- /dev/null +++ b/flytectl/cmd/get/task.go @@ -0,0 +1,204 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/cmd/config" + taskConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/task" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/ext" + "github.com/flyteorg/flytectl/pkg/printer" + "github.com/golang/protobuf/proto" +) + +const ( + taskShort = "Gets task resources" + taskLong = ` + +Retrieve all the tasks within project and domain: +:: + + flytectl get task -p flytesnacks -d development + +.. note:: + The terms task/tasks are interchangeable in these commands. + +Retrieve task by name within project and domain: + +:: + + flytectl task -p flytesnacks -d development core.basic.lp.greet + +Retrieve latest version of task by name within project and domain: + +:: + + flytectl get task -p flytesnacks -d development core.basic.lp.greet --latest + +Retrieve particular version of task by name within project and domain: + +:: + + flytectl get task -p flytesnacks -d development core.basic.lp.greet --version v2 + +Retrieve all the tasks with filters: +:: + + flytectl get task -p flytesnacks -d development --filter.fieldSelector="task.name=k8s_spark.pyspark_pi.print_every_time,task.version=v1" + +Retrieve a specific task with filters: +:: + + flytectl get task -p flytesnacks -d development k8s_spark.pyspark_pi.print_every_time --filter.fieldSelector="task.version=v1,created_at>=2021-05-24T21:43:12.325335Z" + +Retrieve all the tasks with limit and sorting: +:: + + flytectl get -p flytesnacks -d development task --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve tasks present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development task --filter.limit=10 --filter.page=2 + +Retrieve all the tasks within project and domain in yaml format: +:: + + flytectl get task -p flytesnacks -d development -o yaml + +Retrieve all the tasks within project and domain in json format: + +:: + + flytectl get task -p flytesnacks -d development -o json + +Retrieve tasks within project and domain for a version and generate the execution spec file for it to be used for launching the execution using create execution: + +:: + + flytectl get tasks -d development -p flytesnacks core.control_flow.merge_sort.merge --execFile execution_spec.yaml --version v2 + +The generated file would look similar to this: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: v2 + +Check the create execution section on how to launch one using the generated file. + +Usage +` +) + +var taskColumns = []printer.Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Type", JSONPath: "$.closure.compiledTask.template.type"}, + {Header: "Inputs", JSONPath: "$.closure.compiledTask.template.interface.inputs.variables." + printer.DefaultFormattedDescriptionsKey + ".description"}, + {Header: "Outputs", JSONPath: "$.closure.compiledTask.template.interface.outputs.variables." + printer.DefaultFormattedDescriptionsKey + ".description"}, + {Header: "Discoverable", JSONPath: "$.closure.compiledTask.template.metadata.discoverable"}, + {Header: "Discovery Version", JSONPath: "$.closure.compiledTask.template.metadata.discoveryVersion"}, + {Header: "Created At", JSONPath: "$.closure.createdAt"}, +} + +func TaskToProtoMessages(l []*admin.Task) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func TaskToTableProtoMessages(l []*admin.Task) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + m := proto.Clone(m).(*admin.Task) + if m.Closure != nil && m.Closure.CompiledTask != nil { + if m.Closure.CompiledTask.Template != nil { + if m.Closure.CompiledTask.Template.Interface != nil { + if m.Closure.CompiledTask.Template.Interface.Inputs != nil && m.Closure.CompiledTask.Template.Interface.Inputs.Variables != nil { + printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Inputs.Variables) + } + if m.Closure.CompiledTask.Template.Interface.Outputs != nil && m.Closure.CompiledTask.Template.Interface.Outputs.Variables != nil { + printer.FormatVariableDescriptions(m.Closure.CompiledTask.Template.Interface.Outputs.Variables) + } + } + } + } + messages = append(messages, m) + } + return messages +} + +func getTaskFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + taskPrinter := printer.Printer{} + var tasks []*admin.Task + var err error + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) == 1 { + name := args[0] + if tasks, err = FetchTaskForName(ctx, cmdCtx.AdminFetcherExt(), name, project, domain); err != nil { + return err + } + logger.Debugf(ctx, "Retrieved Task", tasks) + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + return taskPrinter.Print(config.GetConfig().MustOutputFormat(), taskColumns, TaskToTableProtoMessages(tasks)...) + } + return taskPrinter.Print(config.GetConfig().MustOutputFormat(), taskColumns, TaskToProtoMessages(tasks)...) + + } + tasks, err = cmdCtx.AdminFetcherExt().FetchAllVerOfTask(ctx, "", config.GetConfig().Project, config.GetConfig().Domain, taskConfig.DefaultConfig.Filter) + if err != nil { + return err + } + logger.Debugf(ctx, "Retrieved %v Task", len(tasks)) + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + return taskPrinter.Print(config.GetConfig().MustOutputFormat(), taskColumns, TaskToTableProtoMessages(tasks)...) + } + return taskPrinter.Print(config.GetConfig().MustOutputFormat(), taskColumns, TaskToProtoMessages(tasks)...) +} + +// FetchTaskForName Reads the task config to drive fetching the correct tasks. +func FetchTaskForName(ctx context.Context, fetcher ext.AdminFetcherExtInterface, name, project, domain string) ([]*admin.Task, error) { + var tasks []*admin.Task + var err error + var task *admin.Task + if taskConfig.DefaultConfig.Latest { + if task, err = fetcher.FetchTaskLatestVersion(ctx, name, project, domain, taskConfig.DefaultConfig.Filter); err != nil { + return nil, err + } + tasks = append(tasks, task) + } else if taskConfig.DefaultConfig.Version != "" { + if task, err = fetcher.FetchTaskVersion(ctx, name, taskConfig.DefaultConfig.Version, project, domain); err != nil { + return nil, err + } + tasks = append(tasks, task) + } else { + tasks, err = fetcher.FetchAllVerOfTask(ctx, name, project, domain, taskConfig.DefaultConfig.Filter) + if err != nil { + return nil, err + } + } + if taskConfig.DefaultConfig.ExecFile != "" { + // There would be atleast one task object when code reaches here and hence the length assertion is not required. + task = tasks[0] + // Only write the first task from the tasks object. + if err = CreateAndWriteExecConfigForTask(task, taskConfig.DefaultConfig.ExecFile); err != nil { + return nil, err + } + } + return tasks, nil +} diff --git a/flytectl/cmd/get/task_test.go b/flytectl/cmd/get/task_test.go new file mode 100644 index 0000000000..2429f38171 --- /dev/null +++ b/flytectl/cmd/get/task_test.go @@ -0,0 +1,532 @@ +package get + +import ( + "fmt" + "os" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flytectl/cmd/config" + + taskConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/task" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/pkg/ext/mocks" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + resourceListRequestTask *admin.ResourceListRequest + resourceListFilterRequestTask *admin.ResourceListRequest + resourceListTaskRequest *admin.ResourceListRequest + resourceListLimitRequestTask *admin.ResourceListRequest + objectGetRequestTask *admin.ObjectGetRequest + namedIDRequestTask *admin.NamedEntityIdentifierListRequest + taskListResponse *admin.TaskList + taskListFilterResponse *admin.TaskList + argsTask []string + namedIdentifierListTask *admin.NamedEntityIdentifierList + task2 *admin.Task +) + +func getTaskSetup() { + argsTask = []string{"task1"} + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var description", + } + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + task1 := &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + } + + task2 = &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v2", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + } + + tasks := []*admin.Task{task2, task1} + resourceListLimitRequestTask = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: argsTask[0], + }, + Limit: 100, + } + resourceListRequestTask = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: argsTask[0], + }, + } + + resourceListTaskRequest = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + }, + } + + resourceListFilterRequestTask = &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: argsTask[0], + }, + Filters: "eq(task.name,task1)+eq(task.version,v1)", + } + + taskListResponse = &admin.TaskList{ + Tasks: tasks, + } + taskListFilterResponse = &admin.TaskList{ + Tasks: []*admin.Task{task1}, + } + objectGetRequestTask = &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: projectValue, + Domain: domainValue, + Name: argsTask[0], + Version: "v2", + }, + } + namedIDRequestTask = &admin.NamedEntityIdentifierListRequest{ + Project: projectValue, + Domain: domainValue, + SortBy: &admin.Sort{ + Key: "name", + Direction: admin.Sort_ASCENDING, + }, + Limit: 100, + } + + var taskEntities []*admin.NamedEntityIdentifier + idTask1 := &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: "task1", + } + idTask2 := &admin.NamedEntityIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: "task2", + } + taskEntities = append(taskEntities, idTask1, idTask2) + namedIdentifierListTask = &admin.NamedEntityIdentifierList{ + Entities: taskEntities, + } + + taskConfig.DefaultConfig.Latest = false + taskConfig.DefaultConfig.ExecFile = "" + taskConfig.DefaultConfig.Version = "" + taskConfig.DefaultConfig.Filter = filters.DefaultFilter +} + +func TestGetTaskFuncWithError(t *testing.T) { + t.Run("failure fetch latest", func(t *testing.T) { + s := setup() + getTaskSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + taskConfig.DefaultConfig.Latest = true + taskConfig.DefaultConfig.Filter = filters.Filters{} + mockFetcher.OnFetchTaskLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching latest version")) + _, err := FetchTaskForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching version ", func(t *testing.T) { + s := setup() + getTaskSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + taskConfig.DefaultConfig.Version = "v1" + taskConfig.DefaultConfig.Filter = filters.Filters{} + mockFetcher.OnFetchTaskVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching version")) + _, err := FetchTaskForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching all version ", func(t *testing.T) { + s := setup() + getTaskSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + taskConfig.DefaultConfig.Filter = filters.Filters{} + mockFetcher.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all version")) + _, err := FetchTaskForName(s.Ctx, mockFetcher, "lpName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching ", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(nil, fmt.Errorf("error fetching all version")) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(nil, fmt.Errorf("error fetching task")) + s.MockAdminClient.OnListTaskIdsMatch(s.Ctx, namedIDRequestTask).Return(nil, fmt.Errorf("error listing task ids")) + s.FetcherExt.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all version")) + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.NotNil(t, err) + }) + + t.Run("failure fetching list task", func(t *testing.T) { + s := setup() + getLaunchPlanSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + argsTask = []string{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListTaskRequest).Return(nil, fmt.Errorf("error fetching all version")) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(nil, fmt.Errorf("error fetching task")) + s.MockAdminClient.OnListTaskIdsMatch(s.Ctx, namedIDRequestTask).Return(nil, fmt.Errorf("error listing task ids")) + s.FetcherExt.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all version")) + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.NotNil(t, err) + }) +} + +func TestGetTaskFunc(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.FetcherExt.OnFetchAllVerOfTaskMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(taskListResponse.Tasks, nil) + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchAllVerOfTask", s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}) + s.TearDownAndVerify(t, `[ + { + "id": { + "name": "task1", + "version": "v2" + }, + "closure": { + "compiledTask": { + "template": { + "interface": { + "inputs": { + "variables": { + "sorted_list1": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + }, + "sorted_list2": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + } + } + } + } + } + }, + "createdAt": "1970-01-01T00:00:01Z" + } + }, + { + "id": { + "name": "task1", + "version": "v1" + }, + "closure": { + "compiledTask": { + "template": { + "interface": { + "inputs": { + "variables": { + "sorted_list1": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + }, + "sorted_list2": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + } + } + } + } + } + }, + "createdAt": "1970-01-01T00:00:00Z" + } + } +]`) +} + +func TestGetTaskFuncWithTable(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil) + config.GetConfig().Output = "table" + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchAllVerOfTask", s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}) + s.TearDownAndVerify(t, ` +--------- ------- ------ --------------------------- --------- -------------- ------------------- ---------------------- +| VERSION | NAME | TYPE | INPUTS | OUTPUTS | DISCOVERABLE | DISCOVERY VERSION | CREATED AT | +--------- ------- ------ --------------------------- --------- -------------- ------------------- ---------------------- +| v2 | task1 | | sorted_list1: var desc... | | | | 1970-01-01T00:00:01Z | +| | | | sorted_list2: var desc... | | | | | +--------- ------- ------ --------------------------- --------- -------------- ------------------- ---------------------- +| v1 | task1 | | sorted_list1: var desc... | | | | 1970-01-01T00:00:00Z | +| | | | sorted_list2: var desc... | | | | | +--------- ------- ------ --------------------------- --------- -------------- ------------------- ---------------------- +2 rows`) +} + +func TestGetTaskFuncLatest(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.MockAdminClient.OnListTaskIdsMatch(s.Ctx, namedIDRequestTask).Return(namedIdentifierListTask, nil) + s.FetcherExt.OnFetchTaskLatestVersion(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(task2, nil) + taskConfig.DefaultConfig.Latest = true + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchTaskLatestVersion", s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}) + s.TearDownAndVerify(t, `{ + "id": { + "name": "task1", + "version": "v2" + }, + "closure": { + "compiledTask": { + "template": { + "interface": { + "inputs": { + "variables": { + "sorted_list1": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + }, + "sorted_list2": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + } + } + } + } + } + }, + "createdAt": "1970-01-01T00:00:01Z" + } +}`) +} + +func TestGetTaskWithVersion(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.MockAdminClient.OnListTaskIdsMatch(s.Ctx, namedIDRequestTask).Return(namedIdentifierListTask, nil) + s.FetcherExt.OnFetchTaskVersion(s.Ctx, "task1", "v2", "dummyProject", "dummyDomain").Return(task2, nil) + taskConfig.DefaultConfig.Version = "v2" + objectGetRequestTask.Id.ResourceType = core.ResourceType_TASK + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchTaskVersion", s.Ctx, "task1", "v2", "dummyProject", "dummyDomain") + s.TearDownAndVerify(t, `{ + "id": { + "name": "task1", + "version": "v2" + }, + "closure": { + "compiledTask": { + "template": { + "interface": { + "inputs": { + "variables": { + "sorted_list1": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + }, + "sorted_list2": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + } + } + } + } + } + }, + "createdAt": "1970-01-01T00:00:01Z" + } +}`) +} + +func TestGetTasks(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{} + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{}).Return(taskListResponse.Tasks, nil) + + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + assert.Nil(t, err) + s.TearDownAndVerify(t, `[{"id": {"name": "task1","version": "v2"},"closure": {"compiledTask": {"template": {"interface": {"inputs": {"variables": {"sorted_list1": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"},"sorted_list2": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "task1","version": "v1"},"closure": {"compiledTask": {"template": {"interface": {"inputs": {"variables": {"sorted_list1": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"},"sorted_list2": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) +} + +func TestGetTasksFilters(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + taskConfig.DefaultConfig.Filter = filters.Filters{ + FieldSelector: "task.name=task1,task.version=v1", + } + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListFilterRequestTask).Return(taskListFilterResponse, nil) + filteredTasks := []*admin.Task{} + for _, task := range taskListResponse.Tasks { + if task.Id.Name == "task1" && task.Id.Version == "v1" { + filteredTasks = append(filteredTasks, task) + } + } + s.FetcherExt.OnFetchAllVerOfTask(s.Ctx, "task1", "dummyProject", "dummyDomain", filters.Filters{ + FieldSelector: "task.name=task1,task.version=v1", + }).Return(filteredTasks, nil) + + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + + assert.Nil(t, err) + s.TearDownAndVerify(t, `{"id": {"name": "task1","version": "v1"},"closure": {"compiledTask": {"template": {"interface": {"inputs": {"variables": {"sorted_list1": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"},"sorted_list2": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "var description"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}`) +} + +func TestGetTaskWithExecFile(t *testing.T) { + s := testutils.SetupWithExt() + getTaskSetup() + s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) + s.MockAdminClient.OnGetTaskMatch(s.Ctx, objectGetRequestTask).Return(task2, nil) + s.MockAdminClient.OnListTaskIdsMatch(s.Ctx, namedIDRequestTask).Return(namedIdentifierListTask, nil) + s.FetcherExt.OnFetchTaskVersion(s.Ctx, "task1", "v2", "dummyProject", "dummyDomain").Return(task2, nil) + taskConfig.DefaultConfig.Version = "v2" + taskConfig.DefaultConfig.ExecFile = testDataFolder + "task_exec_file" + err := getTaskFunc(s.Ctx, argsTask, s.CmdCtx) + os.Remove(taskConfig.DefaultConfig.ExecFile) + assert.Nil(t, err) + s.FetcherExt.AssertCalled(t, "FetchTaskVersion", s.Ctx, "task1", "v2", "dummyProject", "dummyDomain") + s.TearDownAndVerify(t, `{ + "id": { + "name": "task1", + "version": "v2" + }, + "closure": { + "compiledTask": { + "template": { + "interface": { + "inputs": { + "variables": { + "sorted_list1": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + }, + "sorted_list2": { + "type": { + "collectionType": { + "simple": "INTEGER" + } + }, + "description": "var description" + } + } + } + } + } + }, + "createdAt": "1970-01-01T00:00:01Z" + } +}`) +} diff --git a/flytectl/cmd/get/workflow.go b/flytectl/cmd/get/workflow.go new file mode 100644 index 0000000000..d59437a3be --- /dev/null +++ b/flytectl/cmd/get/workflow.go @@ -0,0 +1,205 @@ +package get + +import ( + "context" + + "github.com/flyteorg/flyte/flytestdlib/logger" + workflowconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/workflow" + "github.com/flyteorg/flytectl/pkg/ext" + "github.com/golang/protobuf/proto" + + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/printer" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +const ( + workflowShort = "Gets workflow resources" + workflowLong = ` +Retrieve all the workflows within project and domain (workflow/workflows can be used interchangeably in these commands): +:: + + flytectl get workflow -p flytesnacks -d development + +Retrieve all versions of a workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet + +Retrieve latest version of workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet --latest + +Retrieve particular version of workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet --version v2 + +Retrieve all the workflows with filters: +:: + + flytectl get workflow -p flytesnacks -d development --filter.fieldSelector="workflow.name=k8s_spark.dataframe_passing.my_smart_schema" + +Retrieve specific workflow with filters: +:: + + flytectl get workflow -p flytesnacks -d development k8s_spark.dataframe_passing.my_smart_schema --filter.fieldSelector="workflow.version=v1" + +Retrieve all the workflows with limit and sorting: +:: + + flytectl get -p flytesnacks -d development workflow --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve workflows present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development workflow --filter.limit=10 --filter.page 2 + +Retrieve all the workflows within project and domain in yaml format: + +:: + + flytectl get workflow -p flytesnacks -d development -o yaml + +Retrieve all the workflow within project and domain in json format: + +:: + + flytectl get workflow -p flytesnacks -d development -o json + +Visualize the graph for a workflow within project and domain in dot format: + +:: + + flytectl get workflow -p flytesnacks -d development core.flyte_basics.basic_workflow.my_wf --latest -o dot + +Visualize the graph for a workflow within project and domain in a dot content render: + +:: + + flytectl get workflow -p flytesnacks -d development core.flyte_basics.basic_workflow.my_wf --latest -o doturl + +Usage +` +) + +var workflowColumns = []printer.Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Inputs", JSONPath: "$.closure.compiledWorkflow.primary.template.interface.inputs.variables." + printer.DefaultFormattedDescriptionsKey + ".description"}, + {Header: "Outputs", JSONPath: "$.closure.compiledWorkflow.primary.template.interface.outputs.variables." + printer.DefaultFormattedDescriptionsKey + ".description"}, + {Header: "Created At", JSONPath: "$.closure.createdAt"}, +} + +var listWorkflowColumns = []printer.Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Created At", JSONPath: "$.closure.createdAt"}, +} + +var namedEntityColumns = []printer.Column{ + {Header: "Project", JSONPath: "$.id.project"}, + {Header: "Domain", JSONPath: "$.id.domain"}, + {Header: "Name", JSONPath: "$.id.name"}, + {Header: "Description", JSONPath: "$.metadata.description"}, + {Header: "State", JSONPath: "$.metadata.state"}, +} + +func WorkflowToProtoMessages(l []*admin.Workflow) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func NamedEntityToProtoMessages(l []*admin.NamedEntity) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func WorkflowToTableProtoMessages(l []*admin.Workflow) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + m := proto.Clone(m).(*admin.Workflow) + if m.Closure != nil && m.Closure.CompiledWorkflow != nil { + if m.Closure.CompiledWorkflow.Primary != nil { + if m.Closure.CompiledWorkflow.Primary.Template != nil { + if m.Closure.CompiledWorkflow.Primary.Template.Interface != nil { + if m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables != nil { + printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables) + } + if m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs != nil && m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables != nil { + printer.FormatVariableDescriptions(m.Closure.CompiledWorkflow.Primary.Template.Interface.Outputs.Variables) + } + } + } + } + } + messages = append(messages, m) + } + return messages +} + +func getWorkflowFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + adminPrinter := printer.Printer{} + var workflows []*admin.Workflow + var err error + if len(args) > 0 { + name := args[0] + var isList bool + if workflows, isList, err = FetchWorkflowForName(ctx, cmdCtx.AdminFetcherExt(), name, config.GetConfig().Project, config.GetConfig().Domain); err != nil { + return err + } + columns := workflowColumns + if isList { + columns = listWorkflowColumns + } + logger.Debugf(ctx, "Retrieved %v workflow", len(workflows)) + if config.GetConfig().MustOutputFormat() == printer.OutputFormatTABLE { + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), columns, WorkflowToTableProtoMessages(workflows)...) + } + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), columns, WorkflowToProtoMessages(workflows)...) + } + + nameEntities, err := cmdCtx.AdminFetcherExt().FetchAllWorkflows(ctx, config.GetConfig().Project, config.GetConfig().Domain, workflowconfig.DefaultConfig.Filter) + if err != nil { + return err + } + + logger.Debugf(ctx, "Retrieved %v workflows", len(nameEntities)) + return adminPrinter.Print(config.GetConfig().MustOutputFormat(), namedEntityColumns, NamedEntityToProtoMessages(nameEntities)...) +} + +// FetchWorkflowForName fetches the workflow give it name. +func FetchWorkflowForName(ctx context.Context, fetcher ext.AdminFetcherExtInterface, name, project, + domain string) (workflows []*admin.Workflow, isList bool, err error) { + var workflow *admin.Workflow + if workflowconfig.DefaultConfig.Latest { + if workflow, err = fetcher.FetchWorkflowLatestVersion(ctx, name, project, domain, workflowconfig.DefaultConfig.Filter); err != nil { + return nil, false, err + } + workflows = append(workflows, workflow) + } else if workflowconfig.DefaultConfig.Version != "" { + if workflow, err = fetcher.FetchWorkflowVersion(ctx, name, workflowconfig.DefaultConfig.Version, project, domain); err != nil { + return nil, false, err + } + workflows = append(workflows, workflow) + } else { + workflows, err = fetcher.FetchAllVerOfWorkflow(ctx, name, project, domain, workflowconfig.DefaultConfig.Filter) + if err != nil { + return nil, false, err + } + isList = true + } + return workflows, isList, nil +} diff --git a/flytectl/cmd/get/workflow_test.go b/flytectl/cmd/get/workflow_test.go new file mode 100644 index 0000000000..e534e373e0 --- /dev/null +++ b/flytectl/cmd/get/workflow_test.go @@ -0,0 +1,200 @@ +package get + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/pkg/filters" + "github.com/flyteorg/flytectl/pkg/printer" + + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/pkg/ext/mocks" + + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflow" +) + +var ( + argsWf []string + workflow1 *admin.Workflow + workflows []*admin.Workflow +) + +func getWorkflowSetup() { + + variableMap := map[string]*core.Variable{ + "var1": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var1", + }, + "var2": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var2 long descriptions probably needs truncate", + }, + } + workflow1 = &admin.Workflow{ + Id: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: "workflow1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + }, + } + workflow2 := &admin.Workflow{ + Id: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: "workflow2", + Version: "v2", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + }, + } + workflows = []*admin.Workflow{workflow1, workflow2} + argsWf = []string{"workflow1"} + workflow.DefaultConfig.Latest = false + workflow.DefaultConfig.Version = "" + workflow.DefaultConfig.Filter = filters.DefaultFilter +} + +func TestGetWorkflowFuncWithError(t *testing.T) { + t.Run("failure fetch latest", func(t *testing.T) { + s := setup() + getWorkflowSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + workflow.DefaultConfig.Latest = true + mockFetcher.OnFetchWorkflowLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching latest version")) + _, _, err := FetchWorkflowForName(s.Ctx, mockFetcher, "workflowName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching version ", func(t *testing.T) { + s := setup() + getWorkflowSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + workflow.DefaultConfig.Version = "v1" + mockFetcher.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching version")) + _, _, err := FetchWorkflowForName(s.Ctx, mockFetcher, "workflowName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching all version ", func(t *testing.T) { + s := setup() + getWorkflowSetup() + mockFetcher := new(mocks.AdminFetcherExtInterface) + mockFetcher.OnFetchAllVerOfWorkflowMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all version")) + _, _, err := FetchWorkflowForName(s.Ctx, mockFetcher, "workflowName", projectValue, domainValue) + assert.NotNil(t, err) + }) + + t.Run("failure fetching ", func(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowSetup() + workflow.DefaultConfig.Latest = true + args := []string{"workflowName"} + s.FetcherExt.OnFetchWorkflowLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching latest version")) + err := getWorkflowFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + }) + + t.Run("fetching all workflow success", func(t *testing.T) { + s := setup() + getWorkflowSetup() + var args []string + s.FetcherExt.OnFetchAllWorkflowsMatch(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return([]*admin.NamedEntity{}, nil) + err := getWorkflowFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) + + t.Run("fetching all workflow error", func(t *testing.T) { + s := setup() + getWorkflowSetup() + var args []string + s.FetcherExt.OnFetchAllWorkflowsMatch(mock.Anything, mock.Anything, + mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error fetching all workflows")) + err := getWorkflowFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + }) + +} + +func TestGetWorkflowFuncLatestWithTable(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowSetup() + workflow.DefaultConfig.Latest = true + workflow.DefaultConfig.Filter = filters.Filters{} + config.GetConfig().Output = printer.OutputFormatTABLE.String() + s.FetcherExt.OnFetchWorkflowLatestVersionMatch(s.Ctx, "workflow1", projectValue, domainValue, filters.Filters{}).Return(workflow1, nil) + err := getWorkflowFunc(s.Ctx, argsWf, s.CmdCtx) + assert.Nil(t, err) + s.TearDownAndVerify(t, ` + --------- ----------- --------------------------- --------- ---------------------- +| VERSION | NAME | INPUTS | OUTPUTS | CREATED AT | + --------- ----------- --------------------------- --------- ---------------------- +| v1 | workflow1 | var1 | | 1970-01-01T00:00:00Z | +| | | var2: var2 long descri... | | | + --------- ----------- --------------------------- --------- ---------------------- +1 rows`) +} + +func TestListWorkflowFuncWithTable(t *testing.T) { + s := testutils.SetupWithExt() + getWorkflowSetup() + workflow.DefaultConfig.Filter = filters.Filters{} + config.GetConfig().Output = printer.OutputFormatTABLE.String() + s.FetcherExt.OnFetchAllVerOfWorkflowMatch(s.Ctx, "workflow1", projectValue, domainValue, filters.Filters{}).Return(workflows, nil) + err := getWorkflowFunc(s.Ctx, argsWf, s.CmdCtx) + assert.Nil(t, err) + s.TearDownAndVerify(t, ` + --------- ----------- ---------------------- +| VERSION | NAME | CREATED AT | + --------- ----------- ---------------------- +| v1 | workflow1 | 1970-01-01T00:00:00Z | + --------- ----------- ---------------------- +| v2 | workflow2 | 1970-01-01T00:00:00Z | + --------- ----------- ---------------------- +2 rows`) +} diff --git a/flytectl/cmd/register/examples.go b/flytectl/cmd/register/examples.go new file mode 100644 index 0000000000..fdccad08d5 --- /dev/null +++ b/flytectl/cmd/register/examples.go @@ -0,0 +1,64 @@ +package register + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/cmd/config" + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + g "github.com/flyteorg/flytectl/pkg/github" + + "github.com/google/go-github/v42/github" +) + +const ( + registerExampleShort = "Registers Flytesnacks example." + registerExampleLong = ` +Register all the latest Flytesnacks examples: +:: + + flytectl register examples -d development -p flytesnacks + +Register specific release of Flytesnacks examples: +:: + + flytectl register examples -d development -p flytesnacks --version v0.2.176 + +.. note:: + The register command automatically override the version with release version. + +Usage +` +) + +var ( + flytesnacks = "flytesnacks" +) + +func registerExamplesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + var examples []*github.ReleaseAsset + + // Deprecated checks for --k8Service + deprecatedCheck(ctx, &rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.K8ServiceAccount) + + ghRepo := g.GetGHRepoService() + examples, tag, err := getAllExample(flytesnacks, rconfig.DefaultFilesConfig.Version, ghRepo) + if err != nil { + return err + } + + logger.Infof(ctx, "Register started for %s %s release https://github.com/flyteorg/%s/releases/tag/%s", flytesnacks, tag, flytesnacks, tag) + rconfig.DefaultFilesConfig.Archive = true + rconfig.DefaultFilesConfig.Version = *tag.TagName + for _, v := range examples { + args := []string{ + *v.BrowserDownloadURL, + } + if err := Register(ctx, args, config.GetConfig(), cmdCtx); err != nil { + return fmt.Errorf("example %v failed to register %v", v.Name, err) + } + } + return nil +} diff --git a/flytectl/cmd/register/examples_test.go b/flytectl/cmd/register/examples_test.go new file mode 100644 index 0000000000..fc3996f185 --- /dev/null +++ b/flytectl/cmd/register/examples_test.go @@ -0,0 +1,25 @@ +package register + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegisterExamplesFunc(t *testing.T) { + s := setup() + registerFilesSetup() + args := []string{""} + err := registerExamplesFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) +} +func TestRegisterExamplesFuncErr(t *testing.T) { + s := setup() + registerFilesSetup() + flytesnacks = "testingsnacks" + args := []string{""} + err := registerExamplesFunc(s.Ctx, args, s.CmdCtx) + // TODO (Yuvraj) make test to success after fixing flytesnacks bug + assert.NotNil(t, err) + flytesnacks = "flytesnacks" +} diff --git a/flytectl/cmd/register/files.go b/flytectl/cmd/register/files.go new file mode 100644 index 0000000000..bc2049902e --- /dev/null +++ b/flytectl/cmd/register/files.go @@ -0,0 +1,173 @@ +package register + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/flyteorg/flyte/flytestdlib/storage" + + "github.com/flyteorg/flyte/flytestdlib/logger" + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/printer" +) + +const ( + registerFilesShort = "Registers file resources." + registerFilesLong = ` +Registers all the serialized protobuf files including tasks, workflows and launch plans with default v1 version. + +If previously registered entities with v1 version are present, the command will fail immediately on the first such encounter. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks + +As per Flytectl, registration and fast registration mean the same! + +In fast registration, the input provided by the user is fast serialized proto generated by pyflyte. +When the user runs pyflyte with --fast flag, then pyflyte creates serialized proto and the source code archive file in the same directory. +Flytectl finds the input file by searching for an archive file whose name starts with "fast" and has .tar.gz extension. +If Flytectl finds any source code in users' input, it considers the registration as fast registration. + +SourceUploadPath is an optional flag. By default, Flytectl will create SourceUploadPath from your storage config. +If s3, Flytectl will upload the code base to s3://{{DEFINE_BUCKET_IN_STORAGE_CONFIG}}/fast/{{VERSION}}-fast{{MD5_CREATED_BY_PYFLYTE}.tar.gz}. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 + +In case of fast registration, if the SourceUploadPath flag is defined, Flytectl will not use the default directory to upload the source code. +Instead, it will override the destination path on the registration. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 --SourceUploadPath="s3://dummy/fast" + +To register a .tgz or .tar file, use the --archive flag. They can be local or remote files served through http/https. + +:: + + flytectl register files http://localhost:8080/_pb_output.tar -d development -p flytesnacks --archive + +Using local tgz file: + +:: + + flytectl register files _pb_output.tgz -d development -p flytesnacks --archive + +If you wish to continue executing registration on other files by ignoring the errors including the version conflicts, then send the continueOnError flag: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError + +Using short format of continueOnError flag: +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError + +Override the default version v1 using version string: +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 + +Changing the o/p format has no effect on the registration. The O/p is currently available only in table format: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError -o yaml + +Override IamRole during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --assumableIamRole "arn:aws:iam::123456789:role/dummy" + +Override Kubernetes service account during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --k8sServiceAccount "kubernetes-service-account" + +Override Output location prefix during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --outputLocationPrefix "s3://dummy/prefix" + +Override Destination dir of source code in container during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --destinationDirectory "/root" + +Enable schedule for the launchplans part of the serialized protobuf files: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 --enableSchedule + +Usage +` + sourceCodeExtension = ".tar.gz" +) + +func registerFromFilesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + return Register(ctx, args, config.GetConfig(), cmdCtx) +} + +func Register(ctx context.Context, args []string, cfg *config.Config, cmdCtx cmdCore.CommandContext) error { + var regErr error + var dataRefs []string + + // Deprecated checks for --k8Service + deprecatedCheck(ctx, &rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.K8ServiceAccount) + + // getSerializeOutputFiles will return you all proto and source code compress file in sorted order + dataRefs, tmpDir, err := GetSerializeOutputFiles(ctx, args, rconfig.DefaultFilesConfig.Archive) + if err != nil { + logger.Errorf(ctx, "error while un-archiving files in tmp dir due to %v", err) + return err + } + logger.Infof(ctx, "Parsing file... Total(%v)", len(dataRefs)) + + // It will segregate serialize output files in valid proto,Invalid files if have any and source code(In case of fast serialize input files) + sourceCodePath, validProto, InvalidFiles := segregateSourceAndProtos(dataRefs) + + // If any invalid files provide in input then through an error + if len(InvalidFiles) > 0 { + return fmt.Errorf("input package have some invalid files. try to run pyflyte package again %v", InvalidFiles) + } + + // In case of fast serialize input upload source code to destination bucket + var uploadLocation storage.DataReference + if len(sourceCodePath) > 0 { + logger.Infof(ctx, "Fast Registration detected") + uploadLocation, err = uploadFastRegisterArtifact(ctx, cfg.Project, cfg.Domain, sourceCodePath, rconfig.DefaultFilesConfig.Version, + cmdCtx.ClientSet().DataProxyClient(), rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath) + if err != nil { + return fmt.Errorf("failed to upload source code from [%v]. Error: %w", sourceCodePath, err) + } + + logger.Infof(ctx, "DeprecatedSource code successfully uploaded to [%v]", uploadLocation) + } + + var registerResults []Result + fastFail := !rconfig.DefaultFilesConfig.ContinueOnError + for i := 0; i < len(validProto) && !(fastFail && regErr != nil); i++ { + registerResults, regErr = registerFile(ctx, validProto[i], registerResults, cmdCtx, uploadLocation, *rconfig.DefaultFilesConfig) + } + + payload, _ := json.Marshal(registerResults) + registerPrinter := printer.Printer{} + _ = registerPrinter.JSONToTable(os.Stdout, payload, projectColumns) + if tmpDir != "" { + if _err := os.RemoveAll(tmpDir); _err != nil { + logger.Errorf(ctx, "unable to delete temp dir %v due to %v", tmpDir, _err) + return _err + } + } + return regErr +} diff --git a/flytectl/cmd/register/files_test.go b/flytectl/cmd/register/files_test.go new file mode 100644 index 0000000000..129d4f49c1 --- /dev/null +++ b/flytectl/cmd/register/files_test.go @@ -0,0 +1,221 @@ +package register + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + + "github.com/flyteorg/flyte/flytestdlib/contextutils" + "github.com/flyteorg/flyte/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/flytestdlib/storage" + "github.com/flyteorg/flytectl/cmd/config" + + "github.com/flyteorg/flyte/flytestdlib/promutils" + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + s3Output = "s3://dummy/prefix" +) + +func TestRegisterFromFiles(t *testing.T) { + t.Run("Valid registration", func(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/valid-parent-folder-register.tar"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Valid fast registration", func(t *testing.T) { + s := setup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = s3Output + mockStorage, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = mockStorage + + args := []string{"testdata/flytesnacks-core.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + mockDataProxy := s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient) + mockDataProxy.OnCreateUploadLocationMatch(s.Ctx, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Register a workflow with a failure node", func(t *testing.T) { + s := setup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = s3Output + mockStorage, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = mockStorage + + args := []string{"testdata/failure-node.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + mockDataProxy := s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient) + mockDataProxy.OnCreateUploadLocationMatch(s.Ctx, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Failed fast registration while uploading the codebase", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = true + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = store + args := []string{"testdata/flytesnacks-core.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(mock.Anything, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + err = Register(s.Ctx, args, config.GetConfig(), s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Failed registration because of invalid files", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = true + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = "" + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + Client = store + assert.Nil(t, err) + args := []string{"testdata/invalid-fast.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + }) + t.Run("Failure registration of fast serialize", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = true + + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = s3Output + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + Client = store + assert.Nil(t, err) + args := []string{"testdata/flytesnacks-core.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(1) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(1) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(1) + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(mock.Anything, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed"), err) + }) + t.Run("Failure registration of fast serialize continue on error", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = true + + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = s3Output + rconfig.DefaultFilesConfig.ContinueOnError = true + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + Client = store + assert.Nil(t, err) + args := []string{"testdata/flytesnacks-core.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(39) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(21) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")).Call.Times(24) + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(mock.Anything, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed"), err) + }) + t.Run("Valid registration of fast serialize", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = true + + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = s3Output + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + Client = store + assert.Nil(t, err) + args := []string{"testdata/flytesnacks-core.tgz"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnUpdateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(mock.Anything, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) + + t.Run("Registration with proto files ", func(t *testing.T) { + s := setup() + registerFilesSetup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + rconfig.DefaultFilesConfig.Archive = false + rconfig.DefaultFilesConfig.OutputLocationPrefix = s3Output + rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath = "" + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + Client = store + assert.Nil(t, err) + args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateWorkflowMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(mock.Anything, mock.Anything).Return(&service.CreateUploadLocationResponse{}, nil) + err = registerFromFilesFunc(s.Ctx, args, s.CmdCtx) + assert.Nil(t, err) + }) +} diff --git a/flytectl/cmd/register/register.go b/flytectl/cmd/register/register.go new file mode 100644 index 0000000000..7caa1e9bbd --- /dev/null +++ b/flytectl/cmd/register/register.go @@ -0,0 +1,36 @@ +package register + +import ( + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + registerCmdShort = "Registers tasks, workflows, and launch plans from a list of generated serialized files." + registerCmdLong = ` +Take input files as serialized versions of the tasks/workflows/launchplans and register them with FlyteAdmin. +Currently, these input files are protobuf files generated as output from Flytekit serialize. +Project and Domain are mandatory fields to be passed for registration and an optional version which defaults to v1. +If the entities are already registered with Flyte for the same version, the registration would fail. +` +) + +// RemoteRegisterCommand will return register command +func RemoteRegisterCommand() *cobra.Command { + registerCmd := &cobra.Command{ + Use: "register", + Short: registerCmdShort, + Long: registerCmdLong, + } + registerResourcesFuncs := map[string]cmdcore.CommandEntry{ + "files": {CmdFunc: registerFromFilesFunc, Aliases: []string{"file"}, PFlagProvider: rconfig.DefaultFilesConfig, + Short: registerFilesShort, Long: registerFilesLong}, + "examples": {CmdFunc: registerExamplesFunc, Aliases: []string{"example", "flytesnack", "flytesnacks"}, PFlagProvider: rconfig.DefaultFilesConfig, + Short: registerExampleShort, Long: registerExampleLong}, + } + cmdcore.AddCommands(registerCmd, registerResourcesFuncs) + return registerCmd +} diff --git a/flytectl/cmd/register/register_test.go b/flytectl/cmd/register/register_test.go new file mode 100644 index 0000000000..26a3186344 --- /dev/null +++ b/flytectl/cmd/register/register_test.go @@ -0,0 +1,38 @@ +package register + +import ( + "fmt" + "net/http" + "sort" + "testing" + + u "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/stretchr/testify/assert" +) + +var ( + GetDoFunc func(req *http.Request) (*http.Response, error) +) + +var setup = u.Setup + +func TestRegisterCommand(t *testing.T) { + registerCommand := RemoteRegisterCommand() + assert.Equal(t, registerCommand.Use, "register") + assert.Equal(t, registerCommand.Short, "Registers tasks, workflows, and launch plans from a list of generated serialized files.") + fmt.Println(registerCommand.Commands()) + assert.Equal(t, len(registerCommand.Commands()), 2) + cmdNouns := registerCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, cmdNouns[0].Use, "examples") + assert.Equal(t, cmdNouns[0].Aliases, []string{"example", "flytesnack", "flytesnacks"}) + assert.Equal(t, cmdNouns[0].Short, "Registers Flytesnacks example.") + + assert.Equal(t, cmdNouns[1].Use, "files") + assert.Equal(t, cmdNouns[1].Aliases, []string{"file"}) + assert.Equal(t, cmdNouns[1].Short, "Registers file resources.") +} diff --git a/flytectl/cmd/register/register_util.go b/flytectl/cmd/register/register_util.go new file mode 100644 index 0000000000..5234e86697 --- /dev/null +++ b/flytectl/cmd/register/register_util.go @@ -0,0 +1,931 @@ +package register + +import ( + "archive/tar" + "compress/gzip" + "context" + "crypto/md5" //#nosec + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + errors2 "github.com/flyteorg/flyte/flytestdlib/errors" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + + g "github.com/flyteorg/flytectl/pkg/github" + + "github.com/flyteorg/flyte/flytestdlib/contextutils" + "github.com/flyteorg/flyte/flytestdlib/promutils" + "github.com/flyteorg/flyte/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/flytestdlib/utils" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flyte/flytestdlib/storage" + "github.com/flyteorg/flytectl/cmd/config" + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/printer" + "github.com/google/go-github/v42/github" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + v1 "k8s.io/api/core/v1" +) + +// Variable define in serialized proto that needs to be replace in registration time +const registrationProjectPattern = "{{ registration.project }}" +const registrationDomainPattern = "{{ registration.domain }}" +const registrationVersionPattern = "{{ registration.version }}" + +// Additional variable define in fast serialized proto that needs to be replace in registration time +const registrationRemotePackagePattern = "{{ .remote_package_path }}" +const registrationDestDirPattern = "{{ .dest_dir }}" + +// All supported extensions for compress +var supportedExtensions = []string{".tar", ".tgz", ".tar.gz"} + +// All supported extensions for gzip compress +var validGzipExtensions = []string{".tgz", ".tar.gz"} + +type SignedURLPatternMatcher = *regexp.Regexp + +var ( + SignedURLPattern SignedURLPatternMatcher = regexp.MustCompile(`https://((storage\.googleapis\.com/(?P[^/]+))|((?P[^\.]+)\.s3\.amazonaws\.com)|(.*\.blob\.core\.windows\.net/(?P[^/]+)))/(?P[^?]*)`) +) + +type Result struct { + Name string + Status string + Info string +} + +// HTTPClient interface +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +var Client *storage.DataStore + +var httpClient HTTPClient + +func init() { + httpClient = &http.Client{} +} + +var projectColumns = []printer.Column{ + {Header: "Name", JSONPath: "$.Name"}, + {Header: "Status", JSONPath: "$.Status"}, + {Header: "Additional Info", JSONPath: "$.Info"}, +} + +// Regex to match file name like xxx_1.pb, xxx_2.pb, or xxx_3.pb, and the subgroup catches the number 1, 2 or 3 +// This is used to match proto files created by pyflyte, where xxx_1.pb is a task spec, xxx_2.pb is a workflow spec, and xxx_3.pb is launch plan +var fnameRegex = regexp.MustCompile(`^.*_(?P[1-3])\.pb$`) + +type unMarshalFunc = func(ctx context.Context, fileContents []byte, fname string, errCollection errors2.ErrorCollection) (proto.Message, error) + +// Order matters here +var unMarshalFuncs = []unMarshalFunc{ + unMarshalTask, + unMarshalWorkflow, + unMarshalLaunchPlan, +} + +func UnMarshalContents(ctx context.Context, fileContents []byte, fname string) (proto.Message, error) { + errCollection := errors2.ErrorCollection{} + + for _, f := range reorderUnMarshalFuncs(fname) { + if m, err := f(ctx, fileContents, fname, errCollection); err == nil { + return m, nil + } + } + + return nil, fmt.Errorf("failed unmarshalling file %v. Errors: %w", fname, errCollection.ErrorOrDefault()) +} + +func unMarshalTask(ctx context.Context, fileContents []byte, fname string, errCollection errors2.ErrorCollection) (proto.Message, error) { + return unMarshal(ctx, fileContents, fname, errCollection, "Task", "task", &admin.TaskSpec{}) +} + +func unMarshalWorkflow(ctx context.Context, fileContents []byte, fname string, errCollection errors2.ErrorCollection) (proto.Message, error) { + return unMarshal(ctx, fileContents, fname, errCollection, "Workflow", "workflow", &admin.WorkflowSpec{}) +} + +func unMarshalLaunchPlan(ctx context.Context, fileContents []byte, fname string, errCollection errors2.ErrorCollection) (proto.Message, error) { + return unMarshal(ctx, fileContents, fname, errCollection, "Launchplan", "launch plan", &admin.LaunchPlan{}) +} + +func unMarshal(ctx context.Context, fileContents []byte, fname string, errCollection errors2.ErrorCollection, tpe string, typeAlt string, m proto.Message) (proto.Message, error) { + err := proto.Unmarshal(fileContents, m) + if err == nil { + return m, nil + } + + errCollection.Append(fmt.Errorf("as a %s type: %w", tpe, err)) + logger.Debugf(ctx, "Failed to unmarshal file %s for %v type", fname, typeAlt) + return nil, err +} + +func reorderUnMarshalFuncs(fname string) []unMarshalFunc { + if match := fnameRegex.FindStringSubmatch(fname); match != nil { + indexStr := match[fnameRegex.SubexpIndex("index")] + index, err := strconv.Atoi(indexStr) + if err != nil { + panic(fmt.Sprintf("unexpected error when coverting [%s] to int, file name [%s]", indexStr, fname)) + } + + var reordered []unMarshalFunc + for i, f := range unMarshalFuncs { + if i == index-1 { + reordered = append([]unMarshalFunc{f}, reordered...) + } else { + reordered = append(reordered, f) + } + } + return reordered + } + + return unMarshalFuncs +} + +func register(ctx context.Context, message proto.Message, cmdCtx cmdCore.CommandContext, dryRun, enableSchedule bool) error { + switch v := message.(type) { + case *admin.LaunchPlan: + launchPlan := message.(*admin.LaunchPlan) + if dryRun { + logger.Debugf(ctx, "skipping CreateLaunchPlan request (DryRun)") + return nil + } + _, err := cmdCtx.AdminClient().CreateLaunchPlan(ctx, + &admin.LaunchPlanCreateRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: launchPlan.Id.Name, + Version: launchPlan.Id.Version, + }, + Spec: launchPlan.Spec, + }) + if err != nil { + return err + } + // Activate the launchplan + if enableSchedule { + _, err = cmdCtx.AdminClient().UpdateLaunchPlan(ctx, &admin.LaunchPlanUpdateRequest{ + Id: &core.Identifier{ + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: launchPlan.Id.Name, + Version: launchPlan.Id.Version, + }, + State: admin.LaunchPlanState_ACTIVE, + }) + return err + } + return nil + case *admin.WorkflowSpec: + workflowSpec := message.(*admin.WorkflowSpec) + if dryRun { + logger.Debugf(ctx, "skipping CreateWorkflow request (DryRun)") + return nil + } + _, err := cmdCtx.AdminClient().CreateWorkflow(ctx, + &admin.WorkflowCreateRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: workflowSpec.Template.Id.Name, + Version: workflowSpec.Template.Id.Version, + }, + Spec: workflowSpec, + }) + return err + case *admin.TaskSpec: + taskSpec := message.(*admin.TaskSpec) + if dryRun { + logger.Debugf(ctx, "skipping CreateTask request (DryRun)") + return nil + } + _, err := cmdCtx.AdminClient().CreateTask(ctx, + &admin.TaskCreateRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + Name: taskSpec.Template.Id.Name, + Version: taskSpec.Template.Id.Version, + }, + Spec: taskSpec, + }) + return err + default: + return fmt.Errorf("Failed registering unknown entity %v", v) + } +} + +func hydrateNode(node *core.Node, version string, force bool) error { + targetNode := node.Target + switch v := targetNode.(type) { + case *core.Node_TaskNode: + taskNodeWrapper := targetNode.(*core.Node_TaskNode) + taskNodeReference := taskNodeWrapper.TaskNode.Reference.(*core.TaskNode_ReferenceId) + hydrateIdentifier(taskNodeReference.ReferenceId, version, force) + case *core.Node_WorkflowNode: + workflowNodeWrapper := targetNode.(*core.Node_WorkflowNode) + switch workflowNodeWrapper.WorkflowNode.Reference.(type) { + case *core.WorkflowNode_SubWorkflowRef: + subWorkflowNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_SubWorkflowRef) + hydrateIdentifier(subWorkflowNodeReference.SubWorkflowRef, version, force) + case *core.WorkflowNode_LaunchplanRef: + launchPlanNodeReference := workflowNodeWrapper.WorkflowNode.Reference.(*core.WorkflowNode_LaunchplanRef) + hydrateIdentifier(launchPlanNodeReference.LaunchplanRef, version, force) + default: + return fmt.Errorf("unknown type %T", workflowNodeWrapper.WorkflowNode.Reference) + } + case *core.Node_BranchNode: + branchNodeWrapper := targetNode.(*core.Node_BranchNode) + if err := hydrateNode(branchNodeWrapper.BranchNode.IfElse.Case.ThenNode, version, force); err != nil { + return fmt.Errorf("failed to hydrateNode") + } + if len(branchNodeWrapper.BranchNode.IfElse.Other) > 0 { + for _, ifBlock := range branchNodeWrapper.BranchNode.IfElse.Other { + if err := hydrateNode(ifBlock.ThenNode, version, force); err != nil { + return fmt.Errorf("failed to hydrateNode") + } + } + } + switch branchNodeWrapper.BranchNode.IfElse.Default.(type) { + case *core.IfElseBlock_ElseNode: + elseNodeReference := branchNodeWrapper.BranchNode.IfElse.Default.(*core.IfElseBlock_ElseNode) + if err := hydrateNode(elseNodeReference.ElseNode, version, force); err != nil { + return fmt.Errorf("failed to hydrateNode") + } + + case *core.IfElseBlock_Error: + // Do nothing. + default: + return fmt.Errorf("unknown type %T", branchNodeWrapper.BranchNode.IfElse.Default) + } + case *core.Node_GateNode: + // Do nothing. + case *core.Node_ArrayNode: + if err := hydrateNode(v.ArrayNode.Node, version, force); err != nil { + return fmt.Errorf("failed to hydrateNode") + } + default: + return fmt.Errorf("unknown type %T", v) + } + return nil +} + +func hydrateIdentifier(identifier *core.Identifier, version string, force bool) { + if identifier.Project == "" || identifier.Project == registrationProjectPattern { + identifier.Project = config.GetConfig().Project + } + if identifier.Domain == "" || identifier.Domain == registrationDomainPattern { + identifier.Domain = config.GetConfig().Domain + } + if force || identifier.Version == "" || identifier.Version == registrationVersionPattern { + identifier.Version = version + } +} + +func hydrateTaskSpec(task *admin.TaskSpec, sourceUploadedLocation storage.DataReference, destinationDir string) error { + if task.Template.GetContainer() != nil { + for k := range task.Template.GetContainer().Args { + if task.Template.GetContainer().Args[k] == registrationRemotePackagePattern { + task.Template.GetContainer().Args[k] = sourceUploadedLocation.String() + } + if task.Template.GetContainer().Args[k] == registrationDestDirPattern { + task.Template.GetContainer().Args[k] = "." + if len(destinationDir) > 0 { + task.Template.GetContainer().Args[k] = destinationDir + } + } + } + } else if task.Template.GetK8SPod() != nil && task.Template.GetK8SPod().PodSpec != nil { + var podSpec = v1.PodSpec{} + err := utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &podSpec) + if err != nil { + return err + } + for containerIdx, container := range podSpec.Containers { + for argIdx, arg := range container.Args { + if arg == registrationRemotePackagePattern { + podSpec.Containers[containerIdx].Args[argIdx] = sourceUploadedLocation.String() + } + if arg == registrationDestDirPattern { + podSpec.Containers[containerIdx].Args[argIdx] = "." + if len(destinationDir) > 0 { + podSpec.Containers[containerIdx].Args[argIdx] = destinationDir + } + } + } + } + podSpecStruct, err := utils.MarshalObjToStruct(podSpec) + if err != nil { + return err + } + task.Template.Target = &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + Metadata: task.Template.GetK8SPod().Metadata, + PodSpec: podSpecStruct, + }, + } + } + return nil +} + +func validateLPWithSchedule(lpSpec *admin.LaunchPlanSpec, wf *admin.Workflow) error { + schedule := lpSpec.EntityMetadata.Schedule + var scheduleRequiredParams []string + if wf != nil && wf.Closure != nil && wf.Closure.CompiledWorkflow != nil && + wf.Closure.CompiledWorkflow.Primary != nil && wf.Closure.CompiledWorkflow.Primary.Template != nil && + wf.Closure.CompiledWorkflow.Primary.Template.Interface != nil && + wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs != nil { + variables := wf.Closure.CompiledWorkflow.Primary.Template.Interface.Inputs.Variables + for varName := range variables { + if varName != schedule.KickoffTimeInputArg { + scheduleRequiredParams = append(scheduleRequiredParams, varName) + } + } + + } + // Either the scheduled param should have default or fixed values + var scheduleParamsWithValues []string + // Check for default values + if lpSpec.DefaultInputs != nil { + for paramName, paramValue := range lpSpec.DefaultInputs.Parameters { + if paramName != schedule.KickoffTimeInputArg && paramValue.GetDefault() != nil { + scheduleParamsWithValues = append(scheduleParamsWithValues, paramName) + } + } + } + // Check for fixed values + if lpSpec.FixedInputs != nil && lpSpec.FixedInputs.Literals != nil { + for fixedLiteralName := range lpSpec.FixedInputs.Literals { + scheduleParamsWithValues = append(scheduleParamsWithValues, fixedLiteralName) + } + } + + diffSet := leftDiff(scheduleRequiredParams, scheduleParamsWithValues) + if len(diffSet) > 0 { + return fmt.Errorf("param values are missing on scheduled workflow "+ + "for the following params %v. Either specify them having a default or fixed value", diffSet) + } + return nil +} + +func validateLaunchSpec(ctx context.Context, lpSpec *admin.LaunchPlanSpec, cmdCtx cmdCore.CommandContext) error { + if lpSpec == nil || lpSpec.WorkflowId == nil || lpSpec.EntityMetadata == nil || + lpSpec.EntityMetadata.Schedule == nil { + return nil + } + // Fetch the workflow spec using the identifier + workflowID := lpSpec.WorkflowId + wf, err := cmdCtx.AdminFetcherExt().FetchWorkflowVersion(ctx, workflowID.Name, workflowID.Version, + workflowID.Project, workflowID.Domain) + if err != nil { + return err + } + + return validateLPWithSchedule(lpSpec, wf) +} + +// Finds the left diff between to two string slices +// If a and b are two sets then the o/p c is defined as : +// c = a - a ^ b +// where ^ is intersection slice of a and b +// and - removes all the common elements and returns a new slice +// a= {1,2,3} +// b = {3,4,5} +// o/p c = {1,2} +func leftDiff(a, b []string) []string { + m := make(map[string]bool) + + for _, item := range a { + m[item] = true + } + + for _, item := range b { + delete(m, item) + } + // nil semantics on return + if len(m) == 0 { + return nil + } + c := make([]string, len(m)) + index := 0 + for item := range m { + c[index] = item + index++ + } + return c +} + +func hydrateLaunchPlanSpec(configAssumableIamRole string, configK8sServiceAccount string, configOutputLocationPrefix string, lpSpec *admin.LaunchPlanSpec) error { + assumableIamRole := len(configAssumableIamRole) > 0 + k8sServiceAcct := len(configK8sServiceAccount) > 0 + outputLocationPrefix := len(configOutputLocationPrefix) > 0 + if assumableIamRole || k8sServiceAcct { + lpSpec.AuthRole = &admin.AuthRole{ + KubernetesServiceAccount: configK8sServiceAccount, + AssumableIamRole: configAssumableIamRole, + } + lpSpec.SecurityContext = &core.SecurityContext{ + RunAs: &core.Identity{ + IamRole: configAssumableIamRole, + K8SServiceAccount: configK8sServiceAccount, + }, + } + } + if outputLocationPrefix { + lpSpec.RawOutputDataConfig = &admin.RawOutputDataConfig{ + OutputLocationPrefix: configOutputLocationPrefix, + } + } + return nil +} + +// Validate the spec before sending it to admin. +func validateSpec(ctx context.Context, message proto.Message, cmdCtx cmdCore.CommandContext) error { + switch v := message.(type) { + case *admin.LaunchPlan: + launchPlan := v + if err := validateLaunchSpec(ctx, launchPlan.Spec, cmdCtx); err != nil { + return err + } + } + return nil +} + +func hydrateSpec(message proto.Message, uploadLocation storage.DataReference, config rconfig.FilesConfig) error { + switch v := message.(type) { + case *admin.LaunchPlan: + launchPlan := message.(*admin.LaunchPlan) + hydrateIdentifier(launchPlan.Id, config.Version, config.Force) + hydrateIdentifier(launchPlan.Spec.WorkflowId, config.Version, config.Force) + if err := hydrateLaunchPlanSpec(config.AssumableIamRole, config.K8sServiceAccount, config.OutputLocationPrefix, launchPlan.Spec); err != nil { + return err + } + case *admin.WorkflowSpec: + workflowSpec := message.(*admin.WorkflowSpec) + for _, Noderef := range workflowSpec.Template.Nodes { + if err := hydrateNode(Noderef, config.Version, config.Force); err != nil { + return err + } + } + if workflowSpec.Template.GetFailureNode() != nil { + if err := hydrateNode(workflowSpec.Template.GetFailureNode(), config.Version, config.Force); err != nil { + return err + } + } + hydrateIdentifier(workflowSpec.Template.Id, config.Version, config.Force) + for _, subWorkflow := range workflowSpec.SubWorkflows { + for _, Noderef := range subWorkflow.Nodes { + if err := hydrateNode(Noderef, config.Version, config.Force); err != nil { + return err + } + } + if subWorkflow.GetFailureNode() != nil { + if err := hydrateNode(subWorkflow.GetFailureNode(), config.Version, config.Force); err != nil { + return err + } + } + hydrateIdentifier(subWorkflow.Id, config.Version, config.Force) + } + case *admin.TaskSpec: + taskSpec := message.(*admin.TaskSpec) + hydrateIdentifier(taskSpec.Template.Id, config.Version, config.Force) + // In case of fast serialize input proto also have on additional variable to substitute i.e destination bucket for source code + if err := hydrateTaskSpec(taskSpec, uploadLocation, config.DestinationDirectory); err != nil { + return err + } + + default: + return fmt.Errorf("unknown type %T", v) + } + return nil +} + +func DownloadFileFromHTTP(ctx context.Context, ref storage.DataReference) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ref.String(), nil) + if err != nil { + return nil, err + } + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +/* +Get serialize output file list from the args list. +If the archive flag is on then download the archives to temp directory and extract it. In case of fast register it will also return the compressed source code +The o/p of this function would be sorted list of the file locations. +*/ +func GetSerializeOutputFiles(ctx context.Context, args []string, archive bool) ([]string, string, error) { + if !archive { + /* + * Sorting is required for non-archived case since its possible for the user to pass in a list of unordered + * serialized protobuf files , but flyte expects them to be registered in topologically sorted order that it had + * generated otherwise the registration can fail if the dependent files are not registered earlier. + */ + + finalList := make([]string, 0, len(args)) + for _, arg := range args { + matches, err := filepath.Glob(arg) + if err != nil { + return nil, "", fmt.Errorf("failed to glob [%v]. Error: %w", arg, err) + } + + finalList = append(finalList, matches...) + } + + sort.Strings(finalList) + return finalList, "", nil + } + + tempDir, err := ioutil.TempDir("/tmp", "register") + + if err != nil { + return nil, tempDir, err + } + var unarchivedFiles []string + for _, v := range args { + dataRefReaderCloser, err := getArchiveReaderCloser(ctx, v) + if err != nil { + return unarchivedFiles, tempDir, err + } + archiveReader := tar.NewReader(dataRefReaderCloser) + if unarchivedFiles, err = readAndCopyArchive(archiveReader, tempDir, unarchivedFiles); err != nil { + return unarchivedFiles, tempDir, err + } + if err = dataRefReaderCloser.Close(); err != nil { + return unarchivedFiles, tempDir, err + } + } + + /* + * Similarly in case of archived files, it possible to have an archive created in totally different order than the + * listing order of the serialized files which is required by flyte. Hence we explicitly sort here after unarchiving it. + */ + sort.Strings(unarchivedFiles) + return unarchivedFiles, tempDir, nil +} + +func readAndCopyArchive(src io.Reader, tempDir string, unarchivedFiles []string) ([]string, error) { + for { + tarReader := src.(*tar.Reader) + header, err := tarReader.Next() + switch { + case err == io.EOF: + return unarchivedFiles, nil + case err != nil: + return unarchivedFiles, err + } + // Location to untar. FilePath couldnt be used here due to, + // G305: File traversal when extracting zip archive + target := tempDir + "/" + header.Name + if header.Typeflag == tar.TypeDir { + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, 0755); err != nil { + return unarchivedFiles, err + } + } + } else if header.Typeflag == tar.TypeReg { + dest, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return unarchivedFiles, err + } + if _, err := io.Copy(dest, src); err != nil { + return unarchivedFiles, err + } + unarchivedFiles = append(unarchivedFiles, dest.Name()) + if err := dest.Close(); err != nil { + return unarchivedFiles, err + } + } + } +} + +func registerFile(ctx context.Context, fileName string, registerResults []Result, + cmdCtx cmdCore.CommandContext, uploadLocation storage.DataReference, config rconfig.FilesConfig) ([]Result, error) { + + var registerResult Result + var fileContents []byte + var err error + + if fileContents, err = ioutil.ReadFile(fileName); err != nil { + registerResults = append(registerResults, Result{Name: fileName, Status: "Failed", Info: fmt.Sprintf("Error reading file due to %v", err)}) + return registerResults, err + } + spec, err := UnMarshalContents(ctx, fileContents, fileName) + if err != nil { + registerResult = Result{Name: fileName, Status: "Failed", Info: fmt.Sprintf("Error unmarshalling file due to %v", err)} + registerResults = append(registerResults, registerResult) + return registerResults, err + } + + if err := hydrateSpec(spec, uploadLocation, config); err != nil { + registerResult = Result{Name: fileName, Status: "Failed", Info: fmt.Sprintf("Error hydrating spec due to %v", err)} + registerResults = append(registerResults, registerResult) + return registerResults, err + } + + logger.Debugf(ctx, "Hydrated spec : %v", getJSONSpec(spec)) + if err = validateSpec(ctx, spec, cmdCtx); err != nil { + registerResult = Result{Name: fileName, Status: "Failed", Info: fmt.Sprintf("Error hydrating spec due to %v", err)} + registerResults = append(registerResults, registerResult) + return registerResults, err + } + if err := register(ctx, spec, cmdCtx, config.DryRun, config.EnableSchedule); err != nil { + // If error is AlreadyExists then dont consider this to be an error but just a warning state + if grpcError := status.Code(err); grpcError == codes.AlreadyExists { + registerResult = Result{Name: fileName, Status: "Success", Info: fmt.Sprintf("%v", grpcError.String())} + err = nil + } else { + registerResult = Result{Name: fileName, Status: "Failed", Info: fmt.Sprintf("Error registering file due to %v", err)} + } + registerResults = append(registerResults, registerResult) + return registerResults, err + } + + registerResult = Result{Name: fileName, Status: "Success", Info: "Successfully registered file"} + logger.Debugf(ctx, "Successfully registered %v", fileName) + registerResults = append(registerResults, registerResult) + return registerResults, nil +} + +func getArchiveReaderCloser(ctx context.Context, ref string) (io.ReadCloser, error) { + dataRef := storage.DataReference(ref) + scheme, _, key, err := dataRef.Split() + if err != nil { + return nil, err + } + var dataRefReaderCloser io.ReadCloser + + isValid, extension := checkSupportedExtensionForCompress(key) + if !isValid { + return nil, errors.New("only .tar, .tar.gz and .tgz extension archives are supported") + } + + if scheme == "http" || scheme == "https" { + dataRefReaderCloser, err = DownloadFileFromHTTP(ctx, dataRef) + } else { + dataRefReaderCloser, err = os.Open(dataRef.String()) + } + if err != nil { + return nil, err + } + + for _, ext := range validGzipExtensions { + if ext == extension { + if dataRefReaderCloser, err = gzip.NewReader(dataRefReaderCloser); err != nil { + return nil, err + } + break + } + } + return dataRefReaderCloser, err +} + +func getJSONSpec(message proto.Message) string { + marshaller := jsonpb.Marshaler{ + EnumsAsInts: false, + EmitDefaults: true, + Indent: " ", + OrigName: true, + } + jsonSpec, _ := marshaller.MarshalToString(message) + return jsonSpec +} + +func filterExampleFromRelease(releases *github.RepositoryRelease) []*github.ReleaseAsset { + var assets []*github.ReleaseAsset + for _, v := range releases.Assets { + isValid, _ := checkSupportedExtensionForCompress(*v.Name) + if isValid { + assets = append(assets, v) + } + } + return assets +} + +func getAllExample(repository, version string, repoService g.GHRepoService) ([]*github.ReleaseAsset, *github.RepositoryRelease, error) { + if len(version) > 0 { + release, err := g.GetReleaseByTag(repository, version, repoService) + if err != nil { + return nil, nil, err + } + return filterExampleFromRelease(release), release, nil + } + release, err := g.GetLatestRelease(repository, repoService) + if err != nil { + return nil, nil, err + } + return filterExampleFromRelease(release), release, nil +} + +func getRemoteStoragePath(ctx context.Context, s *storage.DataStore, remoteLocation, file, identifier string) (storage.DataReference, error) { + remotePath, err := s.ConstructReference(ctx, storage.DataReference(remoteLocation), fmt.Sprintf("%v-%v", identifier, file)) + if err != nil { + return "", err + } + + return remotePath, nil +} + +func getTotalSize(reader io.Reader) (size int64, err error) { + page := make([]byte, 512) + size = 0 + + n := 0 + for n, err = reader.Read(page); n > 0 && err == nil; n, err = reader.Read(page) { + size += int64(n) + } + + if err == io.EOF { + return size + int64(n), nil + } + + return size, err +} + +func uploadFastRegisterArtifact(ctx context.Context, project, domain, sourceCodeFilePath, version string, + dataProxyClient service.DataProxyServiceClient, deprecatedSourceUploadPath string) (uploadLocation storage.DataReference, err error) { + + fileHandle, err := os.Open(sourceCodeFilePath) + if err != nil { + return "", err + } + + dataRefReaderCloser, err := gzip.NewReader(fileHandle) + if err != nil { + return "", err + } + + /* #nosec */ + hash := md5.New() + /* #nosec */ + size, err := io.Copy(hash, dataRefReaderCloser) + if err != nil { + return "", err + } + + _, err = fileHandle.Seek(0, 0) + if err != nil { + return "", err + } + + err = dataRefReaderCloser.Reset(fileHandle) + if err != nil { + return "", err + } + + h := hash.Sum(nil) + remotePath := storage.DataReference(deprecatedSourceUploadPath) + _, fileName := filepath.Split(sourceCodeFilePath) + resp, err := dataProxyClient.CreateUploadLocation(ctx, &service.CreateUploadLocationRequest{ + Project: project, + Domain: domain, + Filename: fileName, + ContentMd5: h, + }) + + if err != nil { + if status.Code(err) == codes.Unimplemented { + logger.Infof(ctx, "Using an older version of FlyteAdmin. Falling back to the configured storage client.") + } else { + return "", fmt.Errorf("failed to create an upload location. Error: %w", err) + } + } + + if resp != nil && len(resp.SignedUrl) > 0 { + return storage.DataReference(resp.NativeUrl), DirectUpload(resp.SignedUrl, h, size, dataRefReaderCloser) + } + + dataStore, err := getStorageClient(ctx) + if err != nil { + return "", err + } + + if len(deprecatedSourceUploadPath) == 0 { + remotePath, err = dataStore.ConstructReference(ctx, dataStore.GetBaseContainerFQN(ctx), "fast") + if err != nil { + return "", err + } + } + + remotePath, err = getRemoteStoragePath(ctx, dataStore, remotePath.String(), fileName, version) + if err != nil { + return "", err + } + + if err := dataStore.ComposedProtobufStore.WriteRaw(ctx, remotePath, size, storage.Options{}, dataRefReaderCloser); err != nil { + return "", err + } + + return remotePath, nil +} + +func DirectUpload(url string, contentMD5 []byte, size int64, data io.Reader) error { + req, err := http.NewRequest(http.MethodPut, url, data) + if err != nil { + return err + } + + req.ContentLength = size + req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(contentMD5)) + + client := &http.Client{} + res, err := client.Do(req) + if err != nil { + return err + } + + if res.StatusCode != http.StatusOK { + raw, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("received response code [%v]. Failed to read response body. Error: %w", res.StatusCode, err) + } + + return fmt.Errorf("failed uploading to [%v]. bad status: %s: %s", url, res.Status, string(raw)) + } + + return nil +} + +func getStorageClient(ctx context.Context) (*storage.DataStore, error) { + if Client != nil { + return Client, nil + } + testScope := promutils.NewTestScope() + // Set Keys + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + s, err := storage.NewDataStore(storage.GetConfig(), testScope.NewSubScope("flytectl")) + if err != nil { + logger.Errorf(ctx, "error while creating storage client %v", err) + return Client, err + } + Client = s + return Client, nil +} + +func isFastRegister(file string) bool { + _, f := filepath.Split(file) + // Pyflyte always archive source code with a name that start with fast and have an extension .tar.gz + if strings.HasPrefix(f, "fast") && strings.HasSuffix(f, sourceCodeExtension) { + return true + } + return false +} + +func segregateSourceAndProtos(dataRefs []string) (string, []string, []string) { + var validProto, InvalidFiles []string + var sourceCode string + for _, v := range dataRefs { + if isFastRegister(v) { + sourceCode = v + } else if strings.HasSuffix(v, ".pb") { + validProto = append(validProto, v) + } else { + InvalidFiles = append(InvalidFiles, v) + } + } + return sourceCode, validProto, InvalidFiles +} + +func deprecatedCheck(ctx context.Context, k8sServiceAccount *string, k8ServiceAccount string) { + if len(k8ServiceAccount) > 0 { + logger.Warning(ctx, "--K8ServiceAccount is deprecated, Please use --K8sServiceAccount") + *k8sServiceAccount = k8ServiceAccount + } +} + +func checkSupportedExtensionForCompress(file string) (bool, string) { + for _, extension := range supportedExtensions { + if strings.HasSuffix(file, extension) { + return true, extension + } + } + return false, "" +} diff --git a/flytectl/cmd/register/register_util_test.go b/flytectl/cmd/register/register_util_test.go new file mode 100644 index 0000000000..94dc1912cb --- /dev/null +++ b/flytectl/cmd/register/register_util_test.go @@ -0,0 +1,1010 @@ +package register + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" + ghMocks "github.com/flyteorg/flytectl/pkg/github/mocks" + + "github.com/flyteorg/flyte/flytestdlib/utils" + + v1 "k8s.io/api/core/v1" + + "github.com/flyteorg/flyte/flytestdlib/contextutils" + "github.com/flyteorg/flyte/flytestdlib/promutils" + "github.com/flyteorg/flyte/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/flytestdlib/storage" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + rconfig "github.com/flyteorg/flytectl/cmd/config/subcommand/register" + + "github.com/google/go-github/v42/github" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" +) + +type MockHTTPClient struct { + DoFunc func(req *http.Request) (*http.Response, error) +} + +func (m *MockHTTPClient) Do(req *http.Request) (*http.Response, error) { + return GetDoFunc(req) +} + +func registerFilesSetup() { + httpClient = &MockHTTPClient{} + validTar, err := os.Open("testdata/valid-register.tar") + if err != nil { + fmt.Printf("unexpected error: %v", err) + os.Exit(-1) + } + response := &http.Response{ + Body: validTar, + } + GetDoFunc = func(*http.Request) (*http.Response, error) { + return response, nil + } + + rconfig.DefaultFilesConfig.AssumableIamRole = "" + rconfig.DefaultFilesConfig.K8sServiceAccount = "" + rconfig.DefaultFilesConfig.OutputLocationPrefix = "" + rconfig.DefaultFilesConfig.EnableSchedule = true +} + +func TestGetSortedArchivedFileWithParentFolderList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/valid-parent-folder-register.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 4) + assert.Equal(t, filepath.Join(tmpDir, "parentfolder", "014_recipes.core.basic.basic_workflow.t1_1.pb"), fileList[0]) + assert.Equal(t, filepath.Join(tmpDir, "parentfolder", "015_recipes.core.basic.basic_workflow.t2_1.pb"), fileList[1]) + assert.Equal(t, filepath.Join(tmpDir, "parentfolder", "016_recipes.core.basic.basic_workflow.my_wf_2.pb"), fileList[2]) + assert.Equal(t, filepath.Join(tmpDir, "parentfolder", "017_recipes.core.basic.basic_workflow.my_wf_3.pb"), fileList[3]) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.Nil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedFileList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/valid-register.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 4) + assert.Equal(t, filepath.Join(tmpDir, "014_recipes.core.basic.basic_workflow.t1_1.pb"), fileList[0]) + assert.Equal(t, filepath.Join(tmpDir, "015_recipes.core.basic.basic_workflow.t2_1.pb"), fileList[1]) + assert.Equal(t, filepath.Join(tmpDir, "016_recipes.core.basic.basic_workflow.my_wf_2.pb"), fileList[2]) + assert.Equal(t, filepath.Join(tmpDir, "017_recipes.core.basic.basic_workflow.my_wf_3.pb"), fileList[3]) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.Nil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedFileUnorderedList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/valid-unordered-register.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 4) + assert.Equal(t, filepath.Join(tmpDir, "014_recipes.core.basic.basic_workflow.t1_1.pb"), fileList[0]) + assert.Equal(t, filepath.Join(tmpDir, "015_recipes.core.basic.basic_workflow.t2_1.pb"), fileList[1]) + assert.Equal(t, filepath.Join(tmpDir, "016_recipes.core.basic.basic_workflow.my_wf_2.pb"), fileList[2]) + assert.Equal(t, filepath.Join(tmpDir, "017_recipes.core.basic.basic_workflow.my_wf_3.pb"), fileList[3]) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.Nil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedCorruptedFileList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/invalid.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 0) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.NotNil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedTgzList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/valid-register.tgz"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 4) + assert.Equal(t, filepath.Join(tmpDir, "014_recipes.core.basic.basic_workflow.t1_1.pb"), fileList[0]) + assert.Equal(t, filepath.Join(tmpDir, "015_recipes.core.basic.basic_workflow.t2_1.pb"), fileList[1]) + assert.Equal(t, filepath.Join(tmpDir, "016_recipes.core.basic.basic_workflow.my_wf_2.pb"), fileList[2]) + assert.Equal(t, filepath.Join(tmpDir, "017_recipes.core.basic.basic_workflow.my_wf_3.pb"), fileList[3]) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.Nil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedCorruptedTgzFileList(t *testing.T) { + s := setup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/invalid.tgz"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, 0, len(fileList)) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.NotNil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedInvalidArchiveFileList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"testdata/invalid-extension-register.zip"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, 0, len(fileList)) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.NotNil(t, err) + assert.Equal(t, errors.New("only .tar, .tar.gz and .tgz extension archives are supported"), err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedFileThroughInvalidHttpList(t *testing.T) { + s := setup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"http://invalidhost:invalidport/testdata/valid-register.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, 0, len(fileList)) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.NotNil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedFileThroughValidHttpList(t *testing.T) { + s := setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"http://dummyhost:80/testdata/valid-register.tar"} + fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, len(fileList), 4) + assert.Equal(t, filepath.Join(tmpDir, "014_recipes.core.basic.basic_workflow.t1_1.pb"), fileList[0]) + assert.Equal(t, filepath.Join(tmpDir, "015_recipes.core.basic.basic_workflow.t2_1.pb"), fileList[1]) + assert.Equal(t, filepath.Join(tmpDir, "016_recipes.core.basic.basic_workflow.my_wf_2.pb"), fileList[2]) + assert.Equal(t, filepath.Join(tmpDir, "017_recipes.core.basic.basic_workflow.my_wf_3.pb"), fileList[3]) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.Nil(t, err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func TestGetSortedArchivedFileThroughValidHttpWithNullContextList(t *testing.T) { + setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.Archive = true + args := []string{"http://dummyhost:80/testdata/valid-register.tar"} + var ctx context.Context = nil + fileList, tmpDir, err := GetSerializeOutputFiles(ctx, args, rconfig.DefaultFilesConfig.Archive) + assert.Equal(t, 0, len(fileList)) + assert.True(t, strings.HasPrefix(tmpDir, "/tmp/register")) + assert.NotNil(t, err) + assert.Equal(t, errors.New("net/http: nil Context"), err) + // Clean up the temp directory. + assert.Nil(t, os.RemoveAll(tmpDir), "unable to delete temp dir %v", tmpDir) +} + +func Test_getTotalSize(t *testing.T) { + b := bytes.NewBufferString("hello world") + size, err := getTotalSize(b) + assert.NoError(t, err) + assert.Equal(t, int64(11), size) +} + +func TestRegisterFile(t *testing.T) { + t.Run("Successful run", func(t *testing.T) { + s := setup() + registerFilesSetup() + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) + args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Nil(t, err) + }) + t.Run("Failed Scheduled launch plan registration", func(t *testing.T) { + s := setup() + registerFilesSetup() + s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) + variableMap := map[string]*core.Variable{ + "var1": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var1", + }, + "var2": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var2 long descriptions probably needs truncate", + }, + } + wf := &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + }, + } + s.FetcherExt.OnFetchWorkflowVersionMatch(s.Ctx, "core.scheduled_workflows.lp_schedules.date_formatter_wf", mock.Anything, "dummyProject", "dummyDomain").Return(wf, nil) + args := []string{"testdata/152_my_cron_scheduled_lp_3.pb"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Equal(t, "Failed", results[0].Status) + assert.Contains(t, results[0].Info, "param values are missing on scheduled workflow for the following params") + assert.NotNil(t, err) + }) + t.Run("Non existent file", func(t *testing.T) { + s := setup() + registerFilesSetup() + args := []string{"testdata/non-existent.pb"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Equal(t, "Failed", results[0].Status) + assert.Equal(t, "Error reading file due to open testdata/non-existent.pb: no such file or directory", results[0].Info) + assert.NotNil(t, err) + }) + t.Run("unmarhal failure", func(t *testing.T) { + s := setup() + registerFilesSetup() + args := []string{"testdata/valid-register.tar"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Equal(t, "Failed", results[0].Status) + assert.True(t, strings.HasPrefix(results[0].Info, "Error unmarshalling file due to failed unmarshalling file testdata/valid-register.tar")) + assert.NotNil(t, err) + }) + t.Run("AlreadyExists", func(t *testing.T) { + s := setup() + registerFilesSetup() + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, + status.Error(codes.AlreadyExists, "AlreadyExists")) + args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Equal(t, "Success", results[0].Status) + assert.Equal(t, "AlreadyExists", results[0].Info) + assert.Nil(t, err) + }) + t.Run("Registration Error", func(t *testing.T) { + s := setup() + registerFilesSetup() + s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, + status.Error(codes.InvalidArgument, "Invalid")) + args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} + var registerResults []Result + results, err := registerFile(s.Ctx, args[0], registerResults, s.CmdCtx, "", *rconfig.DefaultFilesConfig) + assert.Equal(t, 1, len(results)) + assert.Equal(t, "Failed", results[0].Status) + assert.Equal(t, "Error registering file due to rpc error: code = InvalidArgument desc = Invalid", results[0].Info) + assert.NotNil(t, err) + }) +} + +func TestHydrateLaunchPlanSpec(t *testing.T) { + t.Run("IamRole override", func(t *testing.T) { + setup() + registerFilesSetup() + rconfig.DefaultFilesConfig.AssumableIamRole = "iamRole" + lpSpec := &admin.LaunchPlanSpec{} + err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) + assert.Nil(t, err) + assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole"}, lpSpec.AuthRole) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole"}}, lpSpec.SecurityContext) + }) + t.Run("k8sService account override", func(t *testing.T) { + registerFilesSetup() + rconfig.DefaultFilesConfig.K8sServiceAccount = "k8Account" + lpSpec := &admin.LaunchPlanSpec{} + err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) + assert.Nil(t, err) + assert.Equal(t, &admin.AuthRole{KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext) + }) + t.Run("Both k8sService and IamRole", func(t *testing.T) { + registerFilesSetup() + rconfig.DefaultFilesConfig.AssumableIamRole = "iamRole" + rconfig.DefaultFilesConfig.K8sServiceAccount = "k8Account" + lpSpec := &admin.LaunchPlanSpec{} + err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) + assert.Nil(t, err) + assert.Equal(t, &admin.AuthRole{AssumableIamRole: "iamRole", + KubernetesServiceAccount: "k8Account"}, lpSpec.AuthRole) + assert.Equal(t, &core.SecurityContext{RunAs: &core.Identity{IamRole: "iamRole", K8SServiceAccount: "k8Account"}}, lpSpec.SecurityContext) + }) + t.Run("Output prefix", func(t *testing.T) { + registerFilesSetup() + rconfig.DefaultFilesConfig.OutputLocationPrefix = "prefix" + lpSpec := &admin.LaunchPlanSpec{} + err := hydrateLaunchPlanSpec(rconfig.DefaultFilesConfig.AssumableIamRole, rconfig.DefaultFilesConfig.K8sServiceAccount, rconfig.DefaultFilesConfig.OutputLocationPrefix, lpSpec) + assert.Nil(t, err) + assert.Equal(t, &admin.RawOutputDataConfig{OutputLocationPrefix: "prefix"}, lpSpec.RawOutputDataConfig) + }) +} + +func TestUploadFastRegisterArtifact(t *testing.T) { + t.Run("Successful upload", func(t *testing.T) { + s := setup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = store + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(s.Ctx, &service.CreateUploadLocationRequest{ + Project: "flytesnacks", + Domain: "development", + Filename: "flytesnacks-core.tgz", + ContentMd5: []uint8{0x19, 0x72, 0x39, 0xcd, 0x85, 0x2d, 0xf1, 0x79, 0x8f, 0x6b, 0x3, 0xb3, 0xa9, 0x6c, 0xec, 0xa0}, + }).Return(&service.CreateUploadLocationResponse{}, nil) + _, err = uploadFastRegisterArtifact(s.Ctx, "flytesnacks", "development", "testdata/flytesnacks-core.tgz", "", s.MockClient.DataProxyClient(), rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath) + assert.Nil(t, err) + }) + t.Run("Failed upload", func(t *testing.T) { + s := setup() + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + store, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = store + s.MockClient.DataProxyClient().(*mocks.DataProxyServiceClient).OnCreateUploadLocationMatch(s.Ctx, &service.CreateUploadLocationRequest{ + Project: "flytesnacks", + Domain: "development", + Filename: "flytesnacks-core.tgz", + ContentMd5: []uint8{0x19, 0x72, 0x39, 0xcd, 0x85, 0x2d, 0xf1, 0x79, 0x8f, 0x6b, 0x3, 0xb3, 0xa9, 0x6c, 0xec, 0xa0}, + }).Return(&service.CreateUploadLocationResponse{}, nil) + _, err = uploadFastRegisterArtifact(context.Background(), "flytesnacks", "development", "testdata/flytesnacks-core.tgz", "", s.MockClient.DataProxyClient(), rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath) + assert.Nil(t, err) + }) + t.Run("Failed upload", func(t *testing.T) { + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + s, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = s + _, err = uploadFastRegisterArtifact(context.Background(), "flytesnacks", "development", "testdata/flytesnacksre.tgz", "", nil, rconfig.DefaultFilesConfig.DeprecatedSourceUploadPath) + assert.NotNil(t, err) + }) +} + +func TestGetStorageClient(t *testing.T) { + t.Run("Failed to create storage client", func(t *testing.T) { + Client = nil + s, err := getStorageClient(context.Background()) + assert.NotNil(t, err) + assert.Nil(t, s) + }) +} + +func TestGetAllFlytesnacksExample(t *testing.T) { + t.Run("Failed to get manifest with wrong name", func(t *testing.T) { + mockGh := &ghMocks.GHRepoService{} + mockGh.OnGetLatestReleaseMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("failed")) + _, _, err := getAllExample("no////ne", "", mockGh) + assert.NotNil(t, err) + }) + t.Run("Failed to get release", func(t *testing.T) { + mockGh := &ghMocks.GHRepoService{} + tag := "v0.15.0" + sandboxManifest := "flyte_sandbox_manifest.tgz" + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + Assets: []*github.ReleaseAsset{{ + Name: &sandboxManifest, + }}, + }, nil, fmt.Errorf("failed")) + _, _, err := getAllExample("homebrew-tap", "1.0", mockGh) + assert.NotNil(t, err) + }) + t.Run("Successfully get examples", func(t *testing.T) { + mockGh := &ghMocks.GHRepoService{} + tag := "v0.15.0" + sandboxManifest := "flyte_sandbox_manifest.tgz" + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, tag).Return(&github.RepositoryRelease{ + TagName: &tag, + Assets: []*github.ReleaseAsset{{ + Name: &sandboxManifest, + }}, + }, nil, nil) + assets, r, err := getAllExample("flytesnacks", tag, mockGh) + assert.Nil(t, err) + assert.Greater(t, len(*r.TagName), 0) + assert.Greater(t, len(assets), 0) + }) +} + +func TestRegister(t *testing.T) { + t.Run("Failed to register", func(t *testing.T) { + s := setup() + registerFilesSetup() + node := &admin.NodeExecution{} + err := register(s.Ctx, node, s.CmdCtx, rconfig.DefaultFilesConfig.DryRun, rconfig.DefaultFilesConfig.EnableSchedule) + assert.NotNil(t, err) + }) +} + +func TestHydrateNode(t *testing.T) { + t.Run("Failed hydrate node", func(t *testing.T) { + registerFilesSetup() + node := &core.Node{} + err := hydrateNode(node, rconfig.DefaultFilesConfig.Version, true) + assert.NotNil(t, err) + }) + + t.Run("hydrateSpec with wrong type", func(t *testing.T) { + registerFilesSetup() + task := &admin.Task{} + err := hydrateSpec(task, "", *rconfig.DefaultFilesConfig) + assert.NotNil(t, err) + }) +} + +func TestHydrateArrayNode(t *testing.T) { + registerFilesSetup() + node := &core.Node{ + Target: &core.Node_ArrayNode{ + ArrayNode: &core.ArrayNode{ + Node: &core.Node{ + Target: &core.Node_TaskNode{ + TaskNode: &core.TaskNode{ + Reference: &core.TaskNode_ReferenceId{ + ReferenceId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "flytesnacks", + Domain: "development", + Name: "n1", + Version: "v1", + }, + }, + }, + }, + }, + }, + }, + } + err := hydrateNode(node, rconfig.DefaultFilesConfig.Version, true) + assert.Nil(t, err) +} + +func TestHydrateGateNode(t *testing.T) { + t.Run("Hydrate Sleep", func(t *testing.T) { + registerFilesSetup() + // Write a node that contains a GateNode + node := &core.Node{ + Target: &core.Node_GateNode{ + GateNode: &core.GateNode{ + Condition: &core.GateNode_Sleep{ + Sleep: &core.SleepCondition{ + Duration: &durationpb.Duration{ + Seconds: 10, + }, + }, + }, + }, + }, + } + err := hydrateNode(node, rconfig.DefaultFilesConfig.Version, true) + assert.Nil(t, err) + }) + + t.Run("Hydrate Signal", func(t *testing.T) { + registerFilesSetup() + // Write a node that contains a GateNode + node := &core.Node{ + Target: &core.Node_GateNode{ + GateNode: &core.GateNode{ + Condition: &core.GateNode_Signal{ + Signal: &core.SignalCondition{ + SignalId: "abc", + }, + }, + }, + }, + } + err := hydrateNode(node, rconfig.DefaultFilesConfig.Version, true) + assert.Nil(t, err) + }) + + t.Run("Hydrate Approve", func(t *testing.T) { + registerFilesSetup() + // Write a node that contains a GateNode + node := &core.Node{ + Target: &core.Node_GateNode{ + GateNode: &core.GateNode{ + Condition: &core.GateNode_Approve{ + Approve: &core.ApproveCondition{ + SignalId: "abc", + }, + }, + }, + }, + } + err := hydrateNode(node, rconfig.DefaultFilesConfig.Version, true) + assert.Nil(t, err) + }) +} + +func TestHydrateTaskSpec(t *testing.T) { + testScope := promutils.NewTestScope() + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) + s, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, testScope.NewSubScope("flytectl")) + assert.Nil(t, err) + Client = s + + metadata := &core.K8SObjectMetadata{ + Labels: map[string]string{ + "l": "a", + }, + Annotations: map[string]string{ + "a": "b", + }, + } + + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Args: []string{"foo", "bar"}, + }, + { + Args: []string{"baz", registrationRemotePackagePattern}, + }, + }, + } + podSpecStruct, err := utils.MarshalObjToStruct(podSpec) + if err != nil { + t.Fatal(err) + } + + task := &admin.TaskSpec{ + Template: &core.TaskTemplate{ + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + Metadata: metadata, + PodSpec: podSpecStruct, + }, + }, + }, + } + err = hydrateTaskSpec(task, storage.DataReference("file://somewhere"), "sourcey") + assert.NoError(t, err) + var hydratedPodSpec = v1.PodSpec{} + err = utils.UnmarshalStructToObj(task.Template.GetK8SPod().PodSpec, &hydratedPodSpec) + assert.NoError(t, err) + assert.Len(t, hydratedPodSpec.Containers[1].Args, 2) + assert.Contains(t, hydratedPodSpec.Containers[1].Args[1], "somewhere") +} + +func TestLeftDiff(t *testing.T) { + t.Run("empty slices", func(t *testing.T) { + c := leftDiff(nil, nil) + assert.Empty(t, c) + }) + t.Run("right empty slice", func(t *testing.T) { + a := []string{"1", "2", "3"} + c := leftDiff(a, nil) + sort.Strings(a) + sort.Strings(c) + assert.Equal(t, a, c) + }) + t.Run("non empty slices without intersection", func(t *testing.T) { + a := []string{"1", "2", "3"} + b := []string{"5", "6", "7"} + c := leftDiff(a, b) + sort.Strings(a) + sort.Strings(c) + assert.Equal(t, a, c) + }) + t.Run("non empty slices with some intersection", func(t *testing.T) { + a := []string{"1", "2", "3"} + b := []string{"2", "5", "7"} + c := leftDiff(a, b) + expected := []string{"1", "3"} + sort.Strings(expected) + sort.Strings(c) + assert.Equal(t, expected, c) + }) + + t.Run("non empty slices with full intersection same order", func(t *testing.T) { + a := []string{"1", "2", "3"} + b := []string{"1", "2", "3"} + c := leftDiff(a, b) + var expected []string + sort.Strings(c) + assert.Equal(t, expected, c) + }) + + t.Run("non empty slices with full intersection diff order", func(t *testing.T) { + a := []string{"1", "2", "3"} + b := []string{"2", "3", "1"} + c := leftDiff(a, b) + var expected []string + sort.Strings(c) + assert.Equal(t, expected, c) + }) +} + +func TestValidateLaunchSpec(t *testing.T) { + ctx := context.Background() + t.Run("nil launchplan spec", func(t *testing.T) { + s := setup() + registerFilesSetup() + err := validateLaunchSpec(ctx, nil, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("launchplan spec with nil workflow id", func(t *testing.T) { + s := setup() + registerFilesSetup() + lpSpec := &admin.LaunchPlanSpec{} + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("launchplan spec with empty metadata", func(t *testing.T) { + s := setup() + registerFilesSetup() + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("launchplan spec with metadata and empty schedule", func(t *testing.T) { + s := setup() + registerFilesSetup() + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{}, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("validate spec failed to fetch workflow", func(t *testing.T) { + s := setup() + registerFilesSetup() + + s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{ + Schedule: &admin.Schedule{ + KickoffTimeInputArg: "kick_off_time_arg", + }, + }, + } + lp := &admin.LaunchPlan{ + Spec: lpSpec, + } + err := validateSpec(ctx, lp, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, "failed", err.Error()) + }) + t.Run("failed to fetch workflow", func(t *testing.T) { + s := setup() + registerFilesSetup() + + s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{ + Schedule: &admin.Schedule{ + KickoffTimeInputArg: "kick_off_time_arg", + }, + }, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, "failed", err.Error()) + }) + t.Run("launchplan spec missing required param schedule", func(t *testing.T) { + s := setup() + registerFilesSetup() + variableMap := map[string]*core.Variable{ + "var1": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var1", + }, + "var2": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var2 long descriptions probably needs truncate", + }, + } + wf := &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + }, + } + s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(wf, nil) + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{ + Schedule: &admin.Schedule{ + KickoffTimeInputArg: "kick_off_time_arg", + }, + }, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "param values are missing on scheduled workflow for the following params") + }) + t.Run("launchplan spec non empty schedule default param success", func(t *testing.T) { + s := setup() + registerFilesSetup() + variableMap := map[string]*core.Variable{ + "var1": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var1", + }, + "var2": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var2 long descriptions probably needs truncate", + }, + } + wf := &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + }, + } + s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(wf, nil) + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{ + Schedule: &admin.Schedule{ + KickoffTimeInputArg: "kick_off_time_arg", + }, + }, + DefaultInputs: &core.ParameterMap{ + Parameters: map[string]*core.Parameter{ + "var1": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + }, + }, + Behavior: &core.Parameter_Default{ + Default: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 10, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + FixedInputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "var2": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 10, + }, + }, + }, + }, + }, + }, + }, + }, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.Nil(t, err) + }) + + t.Run("launchplan spec non empty schedule required param without value fail", func(t *testing.T) { + s := setup() + registerFilesSetup() + variableMap := map[string]*core.Variable{ + "var1": { + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + Description: "var1", + }, + } + wf := &admin.Workflow{ + Closure: &admin.WorkflowClosure{ + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Primary: &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + }, + } + s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(wf, nil) + lpSpec := &admin.LaunchPlanSpec{ + WorkflowId: &core.Identifier{ + Project: "projectValue", + Domain: "domainValue", + Name: "workflowNameValue", + Version: "workflowVersionValue", + }, + EntityMetadata: &admin.LaunchPlanMetadata{ + Schedule: &admin.Schedule{ + KickoffTimeInputArg: "kick_off_time_arg", + }, + }, + DefaultInputs: &core.ParameterMap{ + Parameters: map[string]*core.Parameter{ + "var1": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + }, + }, + }, + }, + }, + } + err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("param values are missing on scheduled workflow for the following params [var1]. Either specify them having a default or fixed value"), err) + }) +} diff --git a/flytectl/cmd/register/testdata/152_my_cron_scheduled_lp_3.pb b/flytectl/cmd/register/testdata/152_my_cron_scheduled_lp_3.pb new file mode 100644 index 0000000000..9f56c79ae1 Binary files /dev/null and b/flytectl/cmd/register/testdata/152_my_cron_scheduled_lp_3.pb differ diff --git a/flytectl/cmd/register/testdata/69_core.flyte_basics.lp.greet_1.pb b/flytectl/cmd/register/testdata/69_core.flyte_basics.lp.greet_1.pb new file mode 100644 index 0000000000..0fc9d10c80 Binary files /dev/null and b/flytectl/cmd/register/testdata/69_core.flyte_basics.lp.greet_1.pb differ diff --git a/flytectl/cmd/register/testdata/failure-node.tgz b/flytectl/cmd/register/testdata/failure-node.tgz new file mode 100644 index 0000000000..7ac63e86fe Binary files /dev/null and b/flytectl/cmd/register/testdata/failure-node.tgz differ diff --git a/flytectl/cmd/register/testdata/flyte-package.tgz b/flytectl/cmd/register/testdata/flyte-package.tgz new file mode 100644 index 0000000000..8ae02691be Binary files /dev/null and b/flytectl/cmd/register/testdata/flyte-package.tgz differ diff --git a/flytectl/cmd/register/testdata/flytesnacks-core.tgz b/flytectl/cmd/register/testdata/flytesnacks-core.tgz new file mode 100644 index 0000000000..4c300c4bba Binary files /dev/null and b/flytectl/cmd/register/testdata/flytesnacks-core.tgz differ diff --git a/flytectl/cmd/register/testdata/invalid-extension-register.zip b/flytectl/cmd/register/testdata/invalid-extension-register.zip new file mode 100644 index 0000000000..6dec9fb7eb --- /dev/null +++ b/flytectl/cmd/register/testdata/invalid-extension-register.zip @@ -0,0 +1 @@ +invalid extension file for register diff --git a/flytectl/cmd/register/testdata/invalid-fast.tgz b/flytectl/cmd/register/testdata/invalid-fast.tgz new file mode 100644 index 0000000000..28216a03e5 Binary files /dev/null and b/flytectl/cmd/register/testdata/invalid-fast.tgz differ diff --git a/flytectl/cmd/register/testdata/invalid.tar b/flytectl/cmd/register/testdata/invalid.tar new file mode 100644 index 0000000000..5c9d15ea1c --- /dev/null +++ b/flytectl/cmd/register/testdata/invalid.tar @@ -0,0 +1 @@ +invalid tar file diff --git a/flytectl/cmd/register/testdata/invalid.tgz b/flytectl/cmd/register/testdata/invalid.tgz new file mode 100644 index 0000000000..3f37575e6a --- /dev/null +++ b/flytectl/cmd/register/testdata/invalid.tgz @@ -0,0 +1 @@ +invalid tgz file diff --git a/flytectl/cmd/register/testdata/valid-fast-register.tgz b/flytectl/cmd/register/testdata/valid-fast-register.tgz new file mode 100644 index 0000000000..8ce7998189 Binary files /dev/null and b/flytectl/cmd/register/testdata/valid-fast-register.tgz differ diff --git a/flytectl/cmd/register/testdata/valid-parent-folder-register.tar b/flytectl/cmd/register/testdata/valid-parent-folder-register.tar new file mode 100644 index 0000000000..5d3091b6f6 Binary files /dev/null and b/flytectl/cmd/register/testdata/valid-parent-folder-register.tar differ diff --git a/flytectl/cmd/register/testdata/valid-register.tar b/flytectl/cmd/register/testdata/valid-register.tar new file mode 100644 index 0000000000..ecfad5102f Binary files /dev/null and b/flytectl/cmd/register/testdata/valid-register.tar differ diff --git a/flytectl/cmd/register/testdata/valid-register.tgz b/flytectl/cmd/register/testdata/valid-register.tgz new file mode 100644 index 0000000000..253f2975b0 Binary files /dev/null and b/flytectl/cmd/register/testdata/valid-register.tgz differ diff --git a/flytectl/cmd/register/testdata/valid-unordered-register.tar b/flytectl/cmd/register/testdata/valid-unordered-register.tar new file mode 100644 index 0000000000..4b845118c3 Binary files /dev/null and b/flytectl/cmd/register/testdata/valid-unordered-register.tar differ diff --git a/flytectl/cmd/root.go b/flytectl/cmd/root.go new file mode 100644 index 0000000000..b1a97bbb5c --- /dev/null +++ b/flytectl/cmd/root.go @@ -0,0 +1,162 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + stdConfig "github.com/flyteorg/flyte/flytestdlib/config" + "github.com/flyteorg/flyte/flytestdlib/config/viper" + "github.com/flyteorg/flytectl/cmd/compile" + "github.com/flyteorg/flytectl/cmd/config" + configuration "github.com/flyteorg/flytectl/cmd/configuration" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/cmd/create" + "github.com/flyteorg/flytectl/cmd/delete" + "github.com/flyteorg/flytectl/cmd/demo" + "github.com/flyteorg/flytectl/cmd/get" + "github.com/flyteorg/flytectl/cmd/register" + "github.com/flyteorg/flytectl/cmd/sandbox" + "github.com/flyteorg/flytectl/cmd/update" + "github.com/flyteorg/flytectl/cmd/upgrade" + "github.com/flyteorg/flytectl/cmd/version" + f "github.com/flyteorg/flytectl/pkg/filesystemutils" + "github.com/flyteorg/flytectl/pkg/printer" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +var ( + cfgFile string + configAccessor = viper.NewAccessor(stdConfig.Options{StrictMode: true}) +) + +const ( + configFileDir = ".flyte" + configFileName = "config.yaml" +) + +func newRootCmd() *cobra.Command { + rootCmd := &cobra.Command{ + PersistentPreRunE: initConfig, + Long: "Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service.", + Short: "Flytectl CLI tool", + Use: "flytectl", + DisableAutoGenTag: true, + } + + rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is $HOME/.flyte/config.yaml)") + + configAccessor.InitializePflags(rootCmd.PersistentFlags()) + + // Due to https://github.com/flyteorg/flyte/issues/341, project flag will have to be specified as + // --root.project, this adds a convenience on top to allow --project to be used + rootCmd.PersistentFlags().StringVarP(&(config.GetConfig().Project), "project", "p", "", "Specifies the Flyte project.") + rootCmd.PersistentFlags().StringVarP(&(config.GetConfig().Domain), "domain", "d", "", "Specifies the Flyte project's domain.") + rootCmd.PersistentFlags().StringVarP(&(config.GetConfig().Output), "output", "o", printer.OutputFormatTABLE.String(), fmt.Sprintf("Specifies the output type - supported formats %s. NOTE: dot, doturl are only supported for Workflow", printer.OutputFormats())) + rootCmd.PersistentFlags().BoolVarP(&(config.GetConfig().Interactive), "interactive", "i", false, "Set this flag to use an interactive CLI") + + rootCmd.AddCommand(get.CreateGetCommand()) + compileCmd := compile.CreateCompileCommand() + cmdCore.AddCommands(rootCmd, compileCmd) + rootCmd.AddCommand(create.RemoteCreateCommand()) + rootCmd.AddCommand(update.CreateUpdateCommand()) + rootCmd.AddCommand(register.RemoteRegisterCommand()) + rootCmd.AddCommand(delete.RemoteDeleteCommand()) + rootCmd.AddCommand(sandbox.CreateSandboxCommand()) + rootCmd.AddCommand(demo.CreateDemoCommand()) + rootCmd.AddCommand(configuration.CreateConfigCommand()) + rootCmd.AddCommand(completionCmd) + // Added version command + versionCmd := version.GetVersionCommand(rootCmd) + cmdCore.AddCommands(rootCmd, versionCmd) + + // Added upgrade command + upgradeCmd := upgrade.SelfUpgrade(rootCmd) + cmdCore.AddCommands(rootCmd, upgradeCmd) + + config.GetConfig() + + // hide global flags + rootCmd.SetUsageTemplate(`Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +`) + + return rootCmd +} + +func initConfig(cmd *cobra.Command, _ []string) error { + configFile := f.FilePathJoin(f.UserHomeDir(), configFileDir, configFileName) + // TODO: Move flyteconfig env variable logic in flytestdlib + if len(os.Getenv("FLYTECTL_CONFIG")) > 0 { + configFile = os.Getenv("FLYTECTL_CONFIG") + } + + if len(cfgFile) > 0 { + configFile = cfgFile + } + + configAccessor = viper.NewAccessor(stdConfig.Options{ + StrictMode: true, + SearchPaths: []string{configFile}, + }) + + // persistent flags were initially bound to the root command so we must bind to the same command to avoid + // overriding those initial ones. We need to traverse up to the root command and initialize pflags for that. + rootCmd := cmd + for rootCmd.Parent() != nil { + rootCmd = rootCmd.Parent() + } + + configAccessor.InitializePflags(rootCmd.PersistentFlags()) + + err := configAccessor.UpdateConfig(context.TODO()) + if err != nil { + return err + } + + return nil +} + +func GenerateDocs() error { + rootCmd := newRootCmd() + err := GenReSTTree(rootCmd, "gen") + if err != nil { + logrus.Fatal(err) + return err + } + return nil +} + +func GenReSTTree(cmd *cobra.Command, dir string) error { + emptyStr := func(s string) string { return "" } + // Sphinx cross-referencing format + linkHandler := func(name, ref string) string { + return fmt.Sprintf(":doc:`%s`", ref) + } + return doc.GenReSTTreeCustom(cmd, dir, emptyStr, linkHandler) +} + +func ExecuteCmd() error { + return newRootCmd().Execute() +} diff --git a/flytectl/cmd/root_test.go b/flytectl/cmd/root_test.go new file mode 100644 index 0000000000..6941485d2c --- /dev/null +++ b/flytectl/cmd/root_test.go @@ -0,0 +1,12 @@ +package cmd + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRootCmdIntegration(t *testing.T) { + rootCmd := newRootCmd() + assert.NotNil(t, rootCmd) +} diff --git a/flytectl/cmd/sandbox/exec.go b/flytectl/cmd/sandbox/exec.go new file mode 100644 index 0000000000..0d45c235e4 --- /dev/null +++ b/flytectl/cmd/sandbox/exec.go @@ -0,0 +1,50 @@ +package sandbox + +import ( + "context" + "fmt" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" +) + +const ( + execShort = "Executes non-interactive command inside the sandbox container" + execLong = ` +Run non-interactive commands inside the sandbox container and immediately return the output. +By default, "flytectl exec" is present in the /root directory inside the sandbox container. + +:: + + flytectl sandbox exec -- ls -al + +Usage` +) + +func sandboxClusterExec(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + if len(args) > 0 { + return execute(ctx, cli, args) + } + return fmt.Errorf("missing argument. Please check usage examples by running flytectl sandbox exec --help") +} + +func execute(ctx context.Context, cli docker.Docker, args []string) error { + c, err := docker.GetSandbox(ctx, cli) + if err != nil { + return err + } + if c != nil { + exec, err := docker.ExecCommend(ctx, cli, c.ID, args) + if err != nil { + return err + } + if err := docker.InspectExecResp(ctx, cli, exec.ID); err != nil { + return err + } + } + return nil +} diff --git a/flytectl/cmd/sandbox/exec_test.go b/flytectl/cmd/sandbox/exec_test.go new file mode 100644 index 0000000000..b86a9a781a --- /dev/null +++ b/flytectl/cmd/sandbox/exec_test.go @@ -0,0 +1,76 @@ +package sandbox + +import ( + "bufio" + "context" + "fmt" + "io" + "strings" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + admin2 "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/stretchr/testify/mock" +) + +func TestSandboxClusterExec(t *testing.T) { + mockDocker := &mocks.Docker{} + mockOutStream := new(io.Writer) + ctx := context.Background() + mockClient := admin2.InitializeMockClientset() + cmdCtx := cmdCore.NewCommandContext(mockClient, *mockOutStream) + reader := bufio.NewReader(strings.NewReader("test")) + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.ExecConfig.Cmd = []string{"ls -al"} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, docker.ExecConfig).Return(types.IDResponse{}, nil) + mockDocker.OnContainerExecInspectMatch(ctx, mock.Anything).Return(types.ContainerExecInspect{}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, fmt.Errorf("Test")) + docker.Client = mockDocker + err := sandboxClusterExec(ctx, []string{"ls -al"}, cmdCtx) + + assert.NotNil(t, err) +} + +func TestSandboxClusterExecWithoutCmd(t *testing.T) { + mockDocker := &mocks.Docker{} + reader := bufio.NewReader(strings.NewReader("test")) + s := testutils.Setup() + ctx := s.Ctx + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.ExecConfig.Cmd = []string{} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, docker.ExecConfig).Return(types.IDResponse{}, nil) + mockDocker.OnContainerExecInspectMatch(ctx, mock.Anything).Return(types.ContainerExecInspect{}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, fmt.Errorf("Test")) + docker.Client = mockDocker + err := sandboxClusterExec(ctx, []string{}, s.CmdCtx) + + assert.NotNil(t, err) +} diff --git a/flytectl/cmd/sandbox/sandbox.go b/flytectl/cmd/sandbox/sandbox.go new file mode 100644 index 0000000000..0e20df4312 --- /dev/null +++ b/flytectl/cmd/sandbox/sandbox.go @@ -0,0 +1,70 @@ +package sandbox + +import ( + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdcore "github.com/flyteorg/flytectl/cmd/core" + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + sandboxShort = `Helps with sandbox interactions like start, teardown, status, and exec.` + sandboxLong = ` +Flyte Sandbox is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte sandbox as a single Docker container locally. + +To create a sandbox cluster, run: +:: + + flytectl sandbox start + +To remove a sandbox cluster, run: +:: + + flytectl sandbox teardown + +To check the status of the sandbox container, run: +:: + + flytectl sandbox status + +To execute commands inside the sandbox container, use exec: +:: + + flytectl sandbox exec -- pwd + +For just printing the docker commands for bringingup the demo container +:: + + flytectl demo start --dryRun + +` +) + +// CreateSandboxCommand will return sandbox command +func CreateSandboxCommand() *cobra.Command { + sandbox := &cobra.Command{ + Use: "sandbox", + Short: sandboxShort, + Long: sandboxLong, + } + + sandboxResourcesFuncs := map[string]cmdcore.CommandEntry{ + "start": {CmdFunc: startSandboxCluster, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: startShort, + Long: startLong, PFlagProvider: sandboxCmdConfig.DefaultConfig, DisableFlyteClient: true}, + "teardown": {CmdFunc: teardownSandboxCluster, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: teardownShort, + Long: teardownLong, DisableFlyteClient: true}, + "status": {CmdFunc: sandboxClusterStatus, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: statusShort, + Long: statusLong}, + "exec": {CmdFunc: sandboxClusterExec, Aliases: []string{}, ProjectDomainNotRequired: true, + Short: execShort, + Long: execLong, DisableFlyteClient: true}, + } + + cmdcore.AddCommands(sandbox, sandboxResourcesFuncs) + + return sandbox +} diff --git a/flytectl/cmd/sandbox/sandbox_test.go b/flytectl/cmd/sandbox/sandbox_test.go new file mode 100644 index 0000000000..0692a08930 --- /dev/null +++ b/flytectl/cmd/sandbox/sandbox_test.go @@ -0,0 +1,39 @@ +package sandbox + +import ( + "fmt" + "sort" + "testing" + + "gotest.tools/assert" +) + +func TestCreateSandboxCommand(t *testing.T) { + sandboxCommand := CreateSandboxCommand() + assert.Equal(t, sandboxCommand.Use, "sandbox") + assert.Equal(t, sandboxCommand.Short, "Helps with sandbox interactions like start, teardown, status, and exec.") + fmt.Println(sandboxCommand.Commands()) + assert.Equal(t, len(sandboxCommand.Commands()), 4) + cmdNouns := sandboxCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, cmdNouns[0].Use, "exec") + assert.Equal(t, cmdNouns[0].Short, execShort) + assert.Equal(t, cmdNouns[0].Long, execLong) + + assert.Equal(t, cmdNouns[1].Use, "start") + assert.Equal(t, cmdNouns[1].Short, startShort) + assert.Equal(t, cmdNouns[1].Long, startLong) + + assert.Equal(t, cmdNouns[2].Use, "status") + assert.Equal(t, cmdNouns[2].Short, statusShort) + assert.Equal(t, cmdNouns[2].Long, statusLong) + + assert.Equal(t, cmdNouns[3].Use, "teardown") + assert.Equal(t, cmdNouns[3].Short, teardownShort) + assert.Equal(t, cmdNouns[3].Long, teardownLong) + +} diff --git a/flytectl/cmd/sandbox/start.go b/flytectl/cmd/sandbox/start.go new file mode 100644 index 0000000000..1996a7f17d --- /dev/null +++ b/flytectl/cmd/sandbox/start.go @@ -0,0 +1,84 @@ +package sandbox + +import ( + "context" + + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/sandbox" +) + +const ( + startShort = "Starts the Flyte sandbox cluster." + startLong = ` +Flyte sandbox is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte sandbox as a single Docker container locally. + +Starts the sandbox cluster without any source code: +:: + + flytectl sandbox start + +Mounts your source code repository inside the sandbox: + +:: + + flytectl sandbox start --source=$HOME/flyteorg/flytesnacks + +Runs a specific version of Flyte. Flytectl sandbox only supports Flyte version available in the Github release, https://github.com/flyteorg/flyte/tags. + +:: + + flytectl sandbox start --version=v0.14.0 + +.. note:: + Flytectl Sandbox is only supported for Flyte versions > v0.10.0. + +Runs the latest pre release of Flyte. +:: + + flytectl sandbox start --pre + +Note: The pre release flag will be ignored if the user passes the version flag. In that case, Flytectl will use a specific version. + +Specify a Flyte Sandbox compliant image with the registry. This is useful in case you want to use an image from your registry. +:: + + flytectl sandbox start --image docker.io/my-override:latest + +Note: If image flag is passed then Flytectl will ignore version and pre flags. + +Specify a Flyte Sandbox image pull policy. Possible pull policy values are Always, IfNotPresent, or Never: +:: + + flytectl sandbox start --image docker.io/my-override:latest --imagePullPolicy Always + +Start sandbox cluster passing environment variables. This can be used to pass docker specific env variables or flyte specific env variables. +eg : for passing timeout value in secs for the sandbox container use the following. +:: + + flytectl sandbox start --env FLYTE_TIMEOUT=700 + + +The DURATION can be a positive integer or a floating-point number, followed by an optional unit suffix:: +s - seconds (default) +m - minutes +h - hours +d - days +When no unit is used, it defaults to seconds. If the duration is set to zero, the associated timeout is disabled. + + +eg : for passing multiple environment variables +:: + + flytectl sandbox start --env USER=foo --env PASSWORD=bar + + +Usage +` +) + +func startSandboxCluster(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + sandboxDefaultConfig := sandboxCmdConfig.DefaultConfig + return sandbox.StartSandboxCluster(ctx, args, sandboxDefaultConfig) +} diff --git a/flytectl/cmd/sandbox/start_test.go b/flytectl/cmd/sandbox/start_test.go new file mode 100644 index 0000000000..3bee1abdbc --- /dev/null +++ b/flytectl/cmd/sandbox/start_test.go @@ -0,0 +1 @@ +package sandbox diff --git a/flytectl/cmd/sandbox/status.go b/flytectl/cmd/sandbox/status.go new file mode 100644 index 0000000000..69476a4301 --- /dev/null +++ b/flytectl/cmd/sandbox/status.go @@ -0,0 +1,32 @@ +package sandbox + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/sandbox" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/docker" +) + +const ( + statusShort = "Gets the status of the sandbox environment." + statusLong = ` +Retrieves the status of the sandbox environment. Currently, Flyte sandbox runs as a local Docker container. + +Usage +:: + + flytectl sandbox status + +` +) + +func sandboxClusterStatus(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + + return sandbox.PrintStatus(ctx, cli) +} diff --git a/flytectl/cmd/sandbox/status_test.go b/flytectl/cmd/sandbox/status_test.go new file mode 100644 index 0000000000..e38cfb0271 --- /dev/null +++ b/flytectl/cmd/sandbox/status_test.go @@ -0,0 +1,39 @@ +package sandbox + +import ( + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/stretchr/testify/assert" +) + +func TestSandboxStatus(t *testing.T) { + t.Run("Sandbox status with zero result", func(t *testing.T) { + mockDocker := &mocks.Docker{} + s := testutils.Setup() + mockDocker.OnContainerList(s.Ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + docker.Client = mockDocker + err := sandboxClusterStatus(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + }) + t.Run("Sandbox status with running sandbox", func(t *testing.T) { + s := testutils.Setup() + ctx := s.Ctx + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + docker.Client = mockDocker + err := sandboxClusterStatus(ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + }) +} diff --git a/flytectl/cmd/sandbox/teardown.go b/flytectl/cmd/sandbox/teardown.go new file mode 100644 index 0000000000..4b2fcd0469 --- /dev/null +++ b/flytectl/cmd/sandbox/teardown.go @@ -0,0 +1,32 @@ +package sandbox + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/sandbox" + + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + teardownShort = "Cleans up the sandbox environment" + teardownLong = ` +Removes the Sandbox cluster and all the Flyte config created by 'sandbox start': +:: + + flytectl sandbox teardown + + +Usage +` +) + +func teardownSandboxCluster(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + return sandbox.Teardown(ctx, cli, sandboxCmdConfig.DefaultTeardownFlags) +} diff --git a/flytectl/cmd/sandbox/teardown_test.go b/flytectl/cmd/sandbox/teardown_test.go new file mode 100644 index 0000000000..63509c22ec --- /dev/null +++ b/flytectl/cmd/sandbox/teardown_test.go @@ -0,0 +1,34 @@ +package sandbox + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/configutil" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/flyteorg/flytectl/pkg/k8s" + k8sMocks "github.com/flyteorg/flytectl/pkg/k8s/mocks" + "github.com/flyteorg/flytectl/pkg/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestTearDownClusterFunc(t *testing.T) { + var containers []types.Container + _ = util.SetupFlyteDir() + _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) + s := testutils.Setup() + ctx := s.Ctx + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + mockK8sContextMgr := &k8sMocks.ContextOps{} + mockK8sContextMgr.OnRemoveContext(mock.Anything).Return(nil) + k8s.ContextMgr = mockK8sContextMgr + + docker.Client = mockDocker + err := teardownSandboxCluster(ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) +} diff --git a/flytectl/cmd/testdata/config.yaml b/flytectl/cmd/testdata/config.yaml new file mode 100644 index 0000000000..5aa315c512 --- /dev/null +++ b/flytectl/cmd/testdata/config.yaml @@ -0,0 +1,3 @@ +admin: + endpoint: http://localhost:30082 + insecure: true diff --git a/flytectl/cmd/testdata/invalid_execution_spec.yaml b/flytectl/cmd/testdata/invalid_execution_spec.yaml new file mode 100644 index 0000000000..cc7d0c32a8 --- /dev/null +++ b/flytectl/cmd/testdata/invalid_execution_spec.yaml @@ -0,0 +1,12 @@ +iamRoleARN: "" +inputs: + numbers: + - 0 + numbers_count: 0 + run_local_at_count: 10 +kubeServiceAcct: "" +targetDomain: "" +targetProject: "" +version: v3 +workflow: core.control_flow.merge_sort.merge_sort +task: core.control_flow.merge_sort.merge diff --git a/flytectl/cmd/testdata/launchplan_execution_spec.yaml b/flytectl/cmd/testdata/launchplan_execution_spec.yaml new file mode 100644 index 0000000000..a396f67ba2 --- /dev/null +++ b/flytectl/cmd/testdata/launchplan_execution_spec.yaml @@ -0,0 +1,11 @@ +iamRoleARN: "" +inputs: + numbers: + - 0 + numbers_count: 0 + run_local_at_count: 10 +kubeServiceAcct: "" +targetDomain: "" +targetProject: "" +version: v3 +workflow: core.control_flow.merge_sort.merge_sort diff --git a/flytectl/cmd/testdata/task_execution_spec.yaml b/flytectl/cmd/testdata/task_execution_spec.yaml new file mode 100644 index 0000000000..4381c1df75 --- /dev/null +++ b/flytectl/cmd/testdata/task_execution_spec.yaml @@ -0,0 +1,15 @@ +iamRoleARN: "iamRoleARN" +inputs: + sorted_list1: + - 0 + - 2 + - 4 + sorted_list2: + - 1 + - 3 + - 5 +kubeServiceAcct: "kubeServiceAcct" +targetDomain: "development" +targetProject: "flytesnacks" +task: core.control_flow.merge_sort.merge +version: v2 diff --git a/flytectl/cmd/testdata/task_execution_spec_with_iamrole.yaml b/flytectl/cmd/testdata/task_execution_spec_with_iamrole.yaml new file mode 100644 index 0000000000..30aea6cbd1 --- /dev/null +++ b/flytectl/cmd/testdata/task_execution_spec_with_iamrole.yaml @@ -0,0 +1,16 @@ +iamRoleARN: "iamRoleARN" +inputs: + sorted_list1: + - 0 + - 2 + - 4 + sorted_list2: + - 1 + - 3 + - 5 +kubeServiceAcct: "" +targetDomain: "development" +targetProject: "flytesnacks" +task: core.control_flow.merge_sort.merge +version: v2 +clusterPool: gpu diff --git a/flytectl/cmd/testutils/test_utils.go b/flytectl/cmd/testutils/test_utils.go new file mode 100644 index 0000000000..28b7856666 --- /dev/null +++ b/flytectl/cmd/testutils/test_utils.go @@ -0,0 +1,156 @@ +package testutils + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "math/rand" + "os" + "regexp" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + extMocks "github.com/flyteorg/flytectl/pkg/ext/mocks" +) + +const projectValue = "dummyProject" +const domainValue = "dummyDomain" +const output = "json" + +type TestStruct struct { + Reader *os.File + Writer *os.File + Err error + Ctx context.Context + MockClient *admin.Clientset + MockAdminClient *mocks.AdminServiceClient + FetcherExt *extMocks.AdminFetcherExtInterface + UpdaterExt *extMocks.AdminUpdaterExtInterface + DeleterExt *extMocks.AdminDeleterExtInterface + MockOutStream io.Writer + CmdCtx cmdCore.CommandContext + StdOut *os.File + Stderr *os.File +} + +func Setup() (s TestStruct) { + s.Ctx = context.Background() + s.Reader, s.Writer, s.Err = os.Pipe() + if s.Err != nil { + panic(s.Err) + } + s.StdOut = os.Stdout + s.Stderr = os.Stderr + os.Stdout = s.Writer + os.Stderr = s.Writer + log.SetOutput(s.Writer) + s.MockClient = admin.InitializeMockClientset() + s.FetcherExt = new(extMocks.AdminFetcherExtInterface) + s.UpdaterExt = new(extMocks.AdminUpdaterExtInterface) + s.DeleterExt = new(extMocks.AdminDeleterExtInterface) + s.FetcherExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.UpdaterExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.DeleterExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.MockAdminClient = s.MockClient.AdminClient().(*mocks.AdminServiceClient) + s.MockOutStream = s.Writer + s.CmdCtx = cmdCore.NewCommandContextWithExt(s.MockClient, s.FetcherExt, s.UpdaterExt, s.DeleterExt, s.MockOutStream) + config.GetConfig().Project = projectValue + config.GetConfig().Domain = domainValue + config.GetConfig().Output = output + + return s +} + +func SetupWithExt() (s TestStruct) { + s.Ctx = context.Background() + s.Reader, s.Writer, s.Err = os.Pipe() + if s.Err != nil { + panic(s.Err) + } + s.StdOut = os.Stdout + s.Stderr = os.Stderr + os.Stdout = s.Writer + os.Stderr = s.Writer + log.SetOutput(s.Writer) + s.MockClient = admin.InitializeMockClientset() + s.FetcherExt = new(extMocks.AdminFetcherExtInterface) + s.UpdaterExt = new(extMocks.AdminUpdaterExtInterface) + s.DeleterExt = new(extMocks.AdminDeleterExtInterface) + s.FetcherExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.UpdaterExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.DeleterExt.OnAdminServiceClient().Return(s.MockClient.AdminClient()) + s.MockAdminClient = s.MockClient.AdminClient().(*mocks.AdminServiceClient) + s.MockOutStream = s.Writer + s.CmdCtx = cmdCore.NewCommandContextWithExt(s.MockClient, s.FetcherExt, s.UpdaterExt, s.DeleterExt, s.MockOutStream) + config.GetConfig().Project = projectValue + config.GetConfig().Domain = domainValue + config.GetConfig().Output = output + + return s +} + +// TearDownAndVerify TODO: Change this to verify log lines from context +func (s *TestStruct) TearDownAndVerify(t *testing.T, expectedLog string) { + if err := s.Writer.Close(); err != nil { + panic(fmt.Errorf("could not close test context writer: %w", err)) + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, s.Reader); err != nil { + panic(fmt.Errorf("could not read from test context reader: %w", err)) + } + + assert.Equal(t, sanitizeString(expectedLog), sanitizeString(buf.String())) +} + +func (s *TestStruct) TearDownAndVerifyContains(t *testing.T, expectedLog string) { + if err := s.Writer.Close(); err != nil { + panic(fmt.Errorf("could not close test context writer: %w", err)) + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, s.Reader); err != nil { + panic(fmt.Errorf("could not read from test context reader: %w", err)) + } + + assert.Contains(t, sanitizeString(buf.String()), sanitizeString(expectedLog)) +} + +// RandomName returns a string composed of random lowercase English letters of specified length. +func RandomName(length int) string { + if length < 0 { + panic("length should be a non-negative number") + } + + var b strings.Builder + for i := 0; i < length; i++ { + c := rune('a' + rand.Intn('z'-'a')) // #nosec G404 - we use this function for testing only, do not need a cryptographically secure random number generator + b.WriteRune(c) + } + + return b.String() +} + +func sanitizeString(str string) string { + // Not the most comprehensive ANSI pattern, but this should capture common color operations + // such as \x1b[107;0m and \x1b[0m. Expand if needed (insert regex 2 problems joke here). + ansiRegex := regexp.MustCompile("\u001B\\[[\\d+\\;]*\\d+m") + replacer := strings.NewReplacer( + "\n", "", + "\t", "", + ) + + str = replacer.Replace(str) + str = ansiRegex.ReplaceAllString(str, "") + str = strings.Trim(str, " ") + + return str +} diff --git a/flytectl/cmd/update/diff.go b/flytectl/cmd/update/diff.go new file mode 100644 index 0000000000..b0f9190f93 --- /dev/null +++ b/flytectl/cmd/update/diff.go @@ -0,0 +1,68 @@ +package update + +import ( + "encoding/json" + "fmt" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "gopkg.in/yaml.v3" +) + +const ( + diffPathBefore = "before" + diffPathAfter = "after" +) + +// DiffAsYaml marshals both objects as YAML and returns differences +// between marshalled values in unified format. Marshalling respects +// JSON field annotations. +func DiffAsYaml(path1, path2 string, object1, object2 any) (string, error) { + yaml1, err := marshalToYamlString(object1) + if err != nil { + return "", fmt.Errorf("diff as yaml: %w", err) + } + + yaml2, err := marshalToYamlString(object2) + if err != nil { + return "", fmt.Errorf("diff as yaml: %w", err) + } + + patch := diffStrings(path1, path2, yaml1, yaml2) + return patch, nil +} + +// marshalToYamlString marshals value to a YAML string, while respecting +// JSON field annotations. +func marshalToYamlString(value any) (string, error) { + jsonText, err := json.Marshal(value) + if err != nil { + return "", fmt.Errorf("marshalling object to json: %w", err) + } + + var jsonObject interface{} + if err := yaml.Unmarshal(jsonText, &jsonObject); err != nil { + return "", fmt.Errorf("unmarshalling yaml to object: %w", err) + } + + data, err := yaml.Marshal(jsonObject) + if err != nil { + return "", fmt.Errorf("marshalling object to yaml: %w", err) + } + + return string(data), nil +} + +// diffStrings returns differences between two strings in unified format. +// An empty string will be returned if both strings are equal. +func diffStrings(path1, path2, s1, s2 string) string { + // We add new lines at the end of each string to avoid + // "\ No newline at end of file" appended to each diff. + s1 += "\n" + s2 += "\n" + + edits := myers.ComputeEdits("", s1, s2) + diff := fmt.Sprint(gotextdiff.ToUnified(path1, path2, s1, edits)) + + return diff +} diff --git a/flytectl/cmd/update/diff_test.go b/flytectl/cmd/update/diff_test.go new file mode 100644 index 0000000000..0bb3df74c2 --- /dev/null +++ b/flytectl/cmd/update/diff_test.go @@ -0,0 +1,62 @@ +package update + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMarshalToYamlStringRespectsJsonFieldAnnotations(t *testing.T) { + type T struct { + FieldIncluded1 int `json:"fieldIncluded1"` + FieldIncluded2 string `json:"fieldIncluded2"` + FieldOmitted string `json:"fieldOmitted,omitempty"` + } + value := T{} + + result, err := marshalToYamlString(value) + + assert.Nil(t, err) + assert.Equal(t, `fieldIncluded1: 0 +fieldIncluded2: "" +`, result) +} + +func TestDiffStringsReturnsAUnifiedDiff(t *testing.T) { + s1 := "abc\ndef\nghi" + s2 := "aaa\ndef\nghi" + + patch := diffStrings("before", "after", s1, s2) + + assert.Equal(t, `--- before ++++ after +@@ -1,3 +1,3 @@ +-abc ++aaa + def + ghi +`, patch) +} + +func TestDiffAsYamlReturnsAUnifiedDiffOfObjectsMarshalledAsYAML(t *testing.T) { + type T struct { + F1 int `json:"f1"` + F2 string `json:"f2"` + F3 string `json:"f3,omitempty"` + } + object1 := T{F1: 5, F2: "apple"} + object2 := T{F1: 10, F2: "apple", F3: "banana"} + + patch, err := DiffAsYaml("before", "after", object1, object2) + + assert.Nil(t, err) + assert.Equal(t, `--- before ++++ after +@@ -1,3 +1,4 @@ +-f1: 5 ++f1: 10 + f2: apple ++f3: banana + +`, patch) +} diff --git a/flytectl/cmd/update/execution.go b/flytectl/cmd/update/execution.go new file mode 100644 index 0000000000..d70b36eddd --- /dev/null +++ b/flytectl/cmd/update/execution.go @@ -0,0 +1,100 @@ +package update + +import ( + "context" + "fmt" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" +) + +const ( + updateExecutionShort = "Updates the execution status" + updateExecutionLong = ` +Activate an execution; and it shows up in the CLI and UI: +:: + + flytectl update execution -p flytesnacks -d development oeh94k9r2r --activate + +Archive an execution; and it is hidden from the CLI and UI: +:: + + flytectl update execution -p flytesnacks -d development oeh94k9r2r --archive + + +Usage +` +) + +func updateExecutionFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) != 1 { + return fmt.Errorf(clierrors.ErrExecutionNotPassed) + } + executionName := args[0] + activate := execution.UConfig.Activate + archive := execution.UConfig.Archive + if activate && archive { + return fmt.Errorf(clierrors.ErrInvalidStateUpdate) + } + + var newState admin.ExecutionState + if activate { + newState = admin.ExecutionState_EXECUTION_ACTIVE + } else if archive { + newState = admin.ExecutionState_EXECUTION_ARCHIVED + } + + exec, err := cmdCtx.AdminFetcherExt().FetchExecution(ctx, executionName, project, domain) + if err != nil { + return fmt.Errorf("update execution: could not fetch execution %s: %w", executionName, err) + } + oldState := exec.GetClosure().GetStateChangeDetails().GetState() + + type Execution struct { + State admin.ExecutionState `json:"state"` + } + patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, Execution{oldState}, Execution{newState}) + if err != nil { + panic(err) + } + + if patch == "" { + fmt.Printf("No changes detected. Skipping the update.\n") + return nil + } + + fmt.Printf("The following changes are to be applied.\n%s\n", patch) + + if execution.UConfig.DryRun { + fmt.Printf("skipping UpdateExecution request (DryRun)\n") + return nil + } + + if !execution.UConfig.Force && !cmdUtil.AskForConfirmation("Continue?", os.Stdin) { + return fmt.Errorf("update aborted by user") + } + + _, err = cmdCtx.AdminClient().UpdateExecution(ctx, &admin.ExecutionUpdateRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: project, + Domain: domain, + Name: executionName, + }, + State: newState, + }) + if err != nil { + fmt.Printf(clierrors.ErrFailedExecutionUpdate, executionName, err) + return err + } + + fmt.Printf("updated execution %s successfully to state %s\n", executionName, newState) + return nil +} diff --git a/flytectl/cmd/update/execution_test.go b/flytectl/cmd/update/execution_test.go new file mode 100644 index 0000000000..e1695319b5 --- /dev/null +++ b/flytectl/cmd/update/execution_test.go @@ -0,0 +1,243 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestExecutionCanBeActivated(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateExecution", s.Ctx, + mock.MatchedBy( + func(r *admin.ExecutionUpdateRequest) bool { + return r.State == admin.ExecutionState_EXECUTION_ACTIVE + })) + }) +} + +func TestExecutionCanBeArchived(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateExecution", s.Ctx, + mock.MatchedBy( + func(r *admin.ExecutionUpdateRequest) bool { + return r.State == admin.ExecutionState_EXECUTION_ARCHIVED + })) + }) +} + +func TestExecutionCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + config.Activate = true + config.Archive = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or archive") + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) +} + +func TestExecutionUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) +} + +func TestExecutionUpdateWithoutForceFlagFails(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) +} + +func TestExecutionUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testExecutionUpdate( + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { + testExecutionUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { + s.FetcherExt. + OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + Return(nil, ext.NewNotFoundError("execution not found")) + s.MockAdminClient. + OnUpdateExecutionMatch(s.Ctx, mock.Anything). + Return(&admin.ExecutionUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }, + ) +} + +func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { + testExecutionUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { + s.FetcherExt. + OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + Return(execution, nil) + s.MockAdminClient. + OnUpdateExecutionMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { + execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateExecution", mock.Anything, mock.Anything) + }, + ) +} + +func TestExecutionUpdateRequiresExecutionName(t *testing.T) { + s := testutils.Setup() + err := updateExecutionFunc(s.Ctx, nil, s.CmdCtx) + + assert.ErrorContains(t, err, "execution name wasn't passed") +} + +func testExecutionUpdate( + setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), + asserter func(s *testutils.TestStruct, err error), +) { + testExecutionUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { + s.FetcherExt. + OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). + Return(execution, nil) + s.MockAdminClient. + OnUpdateExecutionMatch(s.Ctx, mock.Anything). + Return(&admin.ExecutionUpdateResponse{}, nil) + }, + setup, + asserter, + ) +} + +func testExecutionUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, execution *admin.Execution), + setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + target := newTestExecution() + + if mockSetup != nil { + mockSetup(&s, target) + } + + execution.UConfig = &execution.UpdateConfig{} + if setup != nil { + setup(&s, execution.UConfig, target) + } + + args := []string{target.Id.Name} + err := updateExecutionFunc(s.Ctx, args, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + execution.UConfig = &execution.UpdateConfig{} +} + +func newTestExecution() *admin.Execution { + return &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Name: testutils.RandomName(12), + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + Closure: &admin.ExecutionClosure{ + StateChangeDetails: &admin.ExecutionStateChangeDetails{ + State: admin.ExecutionState_EXECUTION_ACTIVE, + }, + }, + } +} diff --git a/flytectl/cmd/update/interfaces/mocks/updater.go b/flytectl/cmd/update/interfaces/mocks/updater.go new file mode 100644 index 0000000000..c702327116 --- /dev/null +++ b/flytectl/cmd/update/interfaces/mocks/updater.go @@ -0,0 +1,50 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + cmdcore "github.com/flyteorg/flytectl/cmd/core" + + core "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + mock "github.com/stretchr/testify/mock" +) + +// Updater is an autogenerated mock type for the Updater type +type Updater struct { + mock.Mock +} + +type Updater_UpdateNamedEntity struct { + *mock.Call +} + +func (_m Updater_UpdateNamedEntity) Return(_a0 error) *Updater_UpdateNamedEntity { + return &Updater_UpdateNamedEntity{Call: _m.Call.Return(_a0)} +} + +func (_m *Updater) OnUpdateNamedEntity(ctx context.Context, name string, project string, domain string, rsType core.ResourceType, cmdCtx cmdcore.CommandContext) *Updater_UpdateNamedEntity { + c_call := _m.On("UpdateNamedEntity", ctx, name, project, domain, rsType, cmdCtx) + return &Updater_UpdateNamedEntity{Call: c_call} +} + +func (_m *Updater) OnUpdateNamedEntityMatch(matchers ...interface{}) *Updater_UpdateNamedEntity { + c_call := _m.On("UpdateNamedEntity", matchers...) + return &Updater_UpdateNamedEntity{Call: c_call} +} + +// UpdateNamedEntity provides a mock function with given fields: ctx, name, project, domain, rsType, cmdCtx +func (_m *Updater) UpdateNamedEntity(ctx context.Context, name string, project string, domain string, rsType core.ResourceType, cmdCtx cmdcore.CommandContext) error { + ret := _m.Called(ctx, name, project, domain, rsType, cmdCtx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, core.ResourceType, cmdcore.CommandContext) error); ok { + r0 = rf(ctx, name, project, domain, rsType, cmdCtx) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/cmd/update/interfaces/updater.go b/flytectl/cmd/update/interfaces/updater.go new file mode 100644 index 0000000000..3aea6aec25 --- /dev/null +++ b/flytectl/cmd/update/interfaces/updater.go @@ -0,0 +1,14 @@ +package interfaces + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +//go:generate mockery -name=Updater -case=underscore + +type Updater interface { + UpdateNamedEntity(ctx context.Context, name, project, domain string, rsType core.ResourceType, cmdCtx cmdCore.CommandContext) error +} diff --git a/flytectl/cmd/update/launch_plan.go b/flytectl/cmd/update/launch_plan.go new file mode 100644 index 0000000000..6d1e7661e2 --- /dev/null +++ b/flytectl/cmd/update/launch_plan.go @@ -0,0 +1,121 @@ +package update + +import ( + "context" + "fmt" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" +) + +const ( + updateLPShort = "Updates launch plan status" + updateLPLong = ` +Activates a ` + "`launch plan `__" + ` which activates the scheduled job associated with it: +:: + + flytectl update launchplan -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --version v1 --activate + +Deactivates a ` + "`launch plan `__" + ` which deschedules any scheduled job associated with it: +:: + + flytectl update launchplan -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --version v1 --deactivate + +Usage +` +) + +func updateLPFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) != 1 { + return fmt.Errorf(clierrors.ErrLPNotPassed) + } + name := args[0] + version := launchplan.UConfig.Version + if len(version) == 0 { + return fmt.Errorf(clierrors.ErrLPVersionNotPassed) + } + + activate := launchplan.UConfig.Activate + archive := launchplan.UConfig.Archive + + var deactivate bool + if archive { + deprecatedCommandWarning(ctx, "archive", "deactivate") + deactivate = true + } else { + deactivate = launchplan.UConfig.Deactivate + } + if activate == deactivate && deactivate { + return fmt.Errorf(clierrors.ErrInvalidBothStateUpdate) + } + + var newState admin.LaunchPlanState + if activate { + newState = admin.LaunchPlanState_ACTIVE + } else if deactivate { + newState = admin.LaunchPlanState_INACTIVE + } + + id := &core.Identifier{ + Project: project, + Domain: domain, + Name: name, + Version: version, + ResourceType: core.ResourceType_LAUNCH_PLAN, + } + + launchPlan, err := cmdCtx.AdminClient().GetLaunchPlan(ctx, &admin.ObjectGetRequest{Id: id}) + if err != nil { + return fmt.Errorf("update launch plan %s: could not fetch launch plan: %w", name, err) + } + oldState := launchPlan.GetClosure().GetState() + + type LaunchPlan struct { + State admin.LaunchPlanState `json:"state"` + } + patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, LaunchPlan{oldState}, LaunchPlan{newState}) + if err != nil { + panic(err) + } + + if patch == "" { + fmt.Printf("No changes detected. Skipping the update.\n") + return nil + } + + fmt.Printf("The following changes are to be applied.\n%s\n", patch) + + if launchplan.UConfig.DryRun { + fmt.Printf("skipping LaunchPlanUpdate request (DryRun)") + return nil + } + + if !launchplan.UConfig.Force && !cmdUtil.AskForConfirmation("Continue?", os.Stdin) { + return fmt.Errorf("update aborted by user") + } + + _, err = cmdCtx.AdminClient().UpdateLaunchPlan(ctx, &admin.LaunchPlanUpdateRequest{ + Id: id, + State: newState, + }) + if err != nil { + return fmt.Errorf(clierrors.ErrFailedLPUpdate, name, err) + } + + fmt.Printf("updated launch plan successfully on %s", name) + + return nil +} + +func deprecatedCommandWarning(ctx context.Context, oldCommand string, newCommand string) { + logger.Warningf(ctx, "--%v is deprecated, Please use --%v", oldCommand, newCommand) +} diff --git a/flytectl/cmd/update/launch_plan_meta.go b/flytectl/cmd/update/launch_plan_meta.go new file mode 100644 index 0000000000..b0567dc49d --- /dev/null +++ b/flytectl/cmd/update/launch_plan_meta.go @@ -0,0 +1,50 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + updateLPMetaShort = "Updates the launch plan metadata" + updateLPMetaLong = ` +Update the description on the launch plan: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --description "Mergesort example" + +Archiving launch plan named entity is not supported and would throw an error: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --archive + +Activating launch plan named entity would be a noop: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --activate + +Usage +` +) + +func getUpdateLPMetaFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + return func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) != 1 { + return fmt.Errorf(clierrors.ErrLPNotPassed) + } + name := args[0] + err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_LAUNCH_PLAN, cmdCtx) + if err != nil { + return fmt.Errorf(clierrors.ErrFailedLPUpdate, name, err) + } + fmt.Printf("updated metadata successfully on %v", name) + return nil + } +} diff --git a/flytectl/cmd/update/launch_plan_meta_test.go b/flytectl/cmd/update/launch_plan_meta_test.go new file mode 100644 index 0000000000..c2a8d637df --- /dev/null +++ b/flytectl/cmd/update/launch_plan_meta_test.go @@ -0,0 +1,196 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestLaunchPlanMetadataCanBeActivated(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ACTIVE + })) + }) +} + +func TestLaunchPlanMetadataCanBeArchived(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + })) + }) +} + +func TestLaunchPlanMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + config.Activate = true + config.Archive = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or archive") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanMetadataUpdateWithoutForceFlagFails(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) +} + +func TestLaunchPlanMetadataUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_LAUNCH_PLAN, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(nil, ext.NewNotFoundError("named entity not found")) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(&admin.NamedEntityUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestLaunchPlanMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_LAUNCH_PLAN, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(namedEntity, nil) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestLaunchPlanMetadataUpdateRequiresLaunchPlanName(t *testing.T) { + s := testutils.Setup() + config := &NamedEntityConfig{} + + err := getUpdateLPMetaFunc(config)(s.Ctx, nil, s.CmdCtx) + + assert.ErrorContains(t, err, "launch plan name wasn't passed") +} diff --git a/flytectl/cmd/update/launch_plan_test.go b/flytectl/cmd/update/launch_plan_test.go new file mode 100644 index 0000000000..4bc92ef095 --- /dev/null +++ b/flytectl/cmd/update/launch_plan_test.go @@ -0,0 +1,295 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestLaunchPlanCanBeActivated(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateLaunchPlan", s.Ctx, + mock.MatchedBy( + func(r *admin.LaunchPlanUpdateRequest) bool { + return r.State == admin.LaunchPlanState_ACTIVE + })) + }) +} + +func TestLaunchPlanCanBeArchived(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateLaunchPlan", s.Ctx, + mock.MatchedBy( + func(r *admin.LaunchPlanUpdateRequest) bool { + return r.State == admin.LaunchPlanState_INACTIVE + })) + }) +} + +func TestLaunchPlanCanBeDeactivated(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_ACTIVE + config.Deactivate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateLaunchPlan", s.Ctx, + mock.MatchedBy( + func(r *admin.LaunchPlanUpdateRequest) bool { + return r.State == admin.LaunchPlanState_INACTIVE + })) + }) +} + +func TestLaunchPlanCannotBeActivatedAndDeactivatedAtTheSameTime(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + config.Activate = true + config.Deactivate = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or deactivate") + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanUpdateWithoutForceFlagFails(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) +} + +func TestLaunchPlanUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testLaunchPlanUpdate( + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }) + }) +} + +func TestLaunchPlanUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { + testLaunchPlanUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { + s.MockAdminClient. + OnGetLaunchPlanMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.ObjectGetRequest) bool { + return cmp.Equal(r.Id, launchplan.Id) + })). + Return(nil, ext.NewNotFoundError("launch plan not found")) + s.MockAdminClient. + OnUpdateLaunchPlanMatch(s.Ctx, mock.Anything). + Return(&admin.LaunchPlanUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }, + ) +} + +func TestLaunchPlanUpdateFailsWhenAdminClientFails(t *testing.T) { + testLaunchPlanUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { + s.MockAdminClient. + OnGetLaunchPlanMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.ObjectGetRequest) bool { + return cmp.Equal(r.Id, launchplan.Id) + })). + Return(launchplan, nil) + s.MockAdminClient. + OnUpdateLaunchPlanMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { + launchplan.Closure.State = admin.LaunchPlanState_INACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateLaunchPlan", mock.Anything, mock.Anything) + }, + ) +} + +func TestLaunchPlanUpdateRequiresLaunchPlanName(t *testing.T) { + s := testutils.Setup() + launchplan.UConfig = &launchplan.UpdateConfig{} + + launchplan.UConfig.Version = testutils.RandomName(2) + err := updateLPFunc(s.Ctx, nil, s.CmdCtx) + + assert.ErrorContains(t, err, "launch plan name wasn't passed") + + // cleanup + launchplan.UConfig = &launchplan.UpdateConfig{} +} + +func TestLaunchPlanUpdateRequiresLaunchPlanVersion(t *testing.T) { + s := testutils.Setup() + launchplan.UConfig = &launchplan.UpdateConfig{} + + name := testutils.RandomName(12) + err := updateLPFunc(s.Ctx, []string{name}, s.CmdCtx) + + assert.ErrorContains(t, err, "launch plan version wasn't passed") + + // cleanup + launchplan.UConfig = &launchplan.UpdateConfig{} +} + +func testLaunchPlanUpdate( + setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), + asserter func(s *testutils.TestStruct, err error), +) { + testLaunchPlanUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { + s.MockAdminClient. + OnGetLaunchPlanMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.ObjectGetRequest) bool { + return cmp.Equal(r.Id, launchplan.Id) + })). + Return(launchplan, nil) + s.MockAdminClient. + OnUpdateLaunchPlanMatch(s.Ctx, mock.Anything). + Return(&admin.LaunchPlanUpdateResponse{}, nil) + }, + setup, + asserter, + ) +} + +func testLaunchPlanUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, launchplan *admin.LaunchPlan), + setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + target := newTestLaunchPlan() + + if mockSetup != nil { + mockSetup(&s, target) + } + + launchplan.UConfig = &launchplan.UpdateConfig{} + if setup != nil { + setup(&s, launchplan.UConfig, target) + } + + args := []string{target.Id.Name} + launchplan.UConfig.Version = target.Id.Version + err := updateLPFunc(s.Ctx, args, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + launchplan.UConfig = &launchplan.UpdateConfig{} +} + +func newTestLaunchPlan() *admin.LaunchPlan { + return &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: testutils.RandomName(12), + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + ResourceType: core.ResourceType_LAUNCH_PLAN, + Version: testutils.RandomName(2), + }, + Closure: &admin.LaunchPlanClosure{ + State: admin.LaunchPlanState_ACTIVE, + }, + } +} diff --git a/flytectl/cmd/update/matchable_attribute_util.go b/flytectl/cmd/update/matchable_attribute_util.go new file mode 100644 index 0000000000..ddb2c11091 --- /dev/null +++ b/flytectl/cmd/update/matchable_attribute_util.go @@ -0,0 +1,172 @@ +package update + +import ( + "context" + "fmt" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +func DecorateAndUpdateMatchableAttr( + ctx context.Context, + cmdCtx cmdCore.CommandContext, + project, domain, workflow string, + resourceType admin.MatchableResource, + attributeDecorator sconfig.MatchableAttributeDecorator, + dryRun bool, + force bool, +) error { + if project == "" { + return fmt.Errorf("project is required") + } + if domain == "" && workflow != "" { + return fmt.Errorf("domain is required") + } + + switch { + case workflow != "": + return updateWorkflowMatchableAttributes(ctx, cmdCtx, project, domain, workflow, resourceType, attributeDecorator, dryRun, force) + case domain != "": + return updateProjectDomainMatchableAttributes(ctx, cmdCtx, project, domain, resourceType, attributeDecorator, dryRun, force) + default: + return updateProjectMatchableAttributes(ctx, cmdCtx, project, resourceType, attributeDecorator, dryRun, force) + } +} + +func updateProjectMatchableAttributes( + ctx context.Context, + cmdCtx cmdCore.CommandContext, + project string, + resourceType admin.MatchableResource, + attributeDecorator sconfig.MatchableAttributeDecorator, + dryRun bool, + force bool, +) error { + if project == "" { + panic("project is empty") + } + + response, err := cmdCtx.AdminFetcherExt().FetchProjectAttributes(ctx, project, resourceType) + if err != nil && !ext.IsNotFoundError(err) { + return fmt.Errorf("update project %s matchable attributes: could not fetch attributes: %w", project, err) + } + + oldMatchingAttributes := response.GetAttributes().GetMatchingAttributes() + newMatchingAttributes := attributeDecorator.Decorate() + + if confirmed, err := confirmMatchableAttributeUpdate(oldMatchingAttributes, newMatchingAttributes, dryRun, force); err != nil || !confirmed { + return err + } + + if err := cmdCtx.AdminUpdaterExt().UpdateProjectAttributes(ctx, project, newMatchingAttributes); err != nil { + return fmt.Errorf("update project %s matchable attributes: update failed: %w", project, err) + } + + fmt.Printf("Updated attributes from %s project\n", project) + return nil +} + +func updateProjectDomainMatchableAttributes( + ctx context.Context, + cmdCtx cmdCore.CommandContext, + project, domain string, + resourceType admin.MatchableResource, + attributeDecorator sconfig.MatchableAttributeDecorator, + dryRun bool, + force bool, +) error { + if project == "" { + panic("project is empty") + } + if domain == "" { + panic("domain is empty") + } + + response, err := cmdCtx.AdminFetcherExt().FetchProjectDomainAttributes(ctx, project, domain, resourceType) + if err != nil && !ext.IsNotFoundError(err) { + return fmt.Errorf("update project %s domain %s matchable attributes: could not fetch attributes: %w", project, domain, err) + } + + oldMatchingAttributes := response.GetAttributes().GetMatchingAttributes() + newMatchingAttributes := attributeDecorator.Decorate() + + if confirmed, err := confirmMatchableAttributeUpdate(oldMatchingAttributes, newMatchingAttributes, dryRun, force); err != nil || !confirmed { + return err + } + + if err := cmdCtx.AdminUpdaterExt().UpdateProjectDomainAttributes(ctx, project, domain, newMatchingAttributes); err != nil { + return fmt.Errorf("update project %s domain %s matchable attributes: update failed: %w", project, domain, err) + } + + fmt.Printf("Updated attributes from %s project and domain %s\n", project, domain) + return nil +} + +func updateWorkflowMatchableAttributes( + ctx context.Context, + cmdCtx cmdCore.CommandContext, + project, domain, workflow string, + resourceType admin.MatchableResource, + attributeDecorator sconfig.MatchableAttributeDecorator, + dryRun bool, + force bool, +) error { + if project == "" { + panic("project is empty") + } + if domain == "" { + panic("domain is empty") + } + if workflow == "" { + panic("workflow is empty") + } + + response, err := cmdCtx.AdminFetcherExt().FetchWorkflowAttributes(ctx, project, domain, workflow, resourceType) + if err != nil && !ext.IsNotFoundError(err) { + return fmt.Errorf("update project %s domain %s workflow %s matchable attributes: could not fetch attributes: %w", project, domain, workflow, err) + } + + oldMatchingAttributes := response.GetAttributes().GetMatchingAttributes() + newMatchingAttributes := attributeDecorator.Decorate() + + if confirmed, err := confirmMatchableAttributeUpdate(oldMatchingAttributes, newMatchingAttributes, dryRun, force); err != nil || !confirmed { + return err + } + + if err := cmdCtx.AdminUpdaterExt().UpdateWorkflowAttributes(ctx, project, domain, workflow, newMatchingAttributes); err != nil { + return fmt.Errorf("update project %s domain %s workflow %s matchable attributes: update failed: %w", project, domain, workflow, err) + } + + fmt.Printf("Updated attributes from %s project and domain %s and workflow %s\n", project, domain, workflow) + return nil +} + +func confirmMatchableAttributeUpdate(old, new *admin.MatchingAttributes, dryRun, force bool) (bool, error) { + patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, old.GetTarget(), new.GetTarget()) + if err != nil { + return false, fmt.Errorf("update matchable attributes: %w", err) + } + + if patch == "" { + fmt.Printf("No changes detected. Skipping the update.\n") + return false, nil + } + + fmt.Printf("The following changes are to be applied.\n%s\n", patch) + + if dryRun { + fmt.Printf("Skipping update request (dryRun)\n") + return false, nil + } + + if !force && !cmdUtil.AskForConfirmation("Continue?", os.Stdin) { + return false, fmt.Errorf("update aborted by user") + } + + return true, nil +} diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute.go b/flytectl/cmd/update/matchable_cluster_resource_attribute.go new file mode 100644 index 0000000000..51f5b64922 --- /dev/null +++ b/flytectl/cmd/update/matchable_cluster_resource_attribute.go @@ -0,0 +1,81 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + clusterResourceAttributesShort = "Update matchable resources of cluster attributes" + clusterResourceAttributesLong = ` +Update cluster resource attributes for given project and domain combination or additionally with workflow name. + +Updating to the cluster resource attribute is only available from a generated file. See the get section to generate this file. +It takes input for cluster resource attributes from the config file cra.yaml, +Example: content of cra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +:: + + flytectl update cluster-resource-attribute --attrFile cra.yaml + +Update cluster resource attribute for project and domain and workflow combination. This will take precedence over any other +resource attribute defined at project domain level. +This will completely overwrite any existing custom project, domain and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute that is already set and then update it to have new values. +Refer to get cluster-resource-attribute section on how to generate this file. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + attributes: + foo: "bar" + buzz: "lightyear" + +:: + + flytectl update cluster-resource-attribute --attrFile cra.yaml + +Usage + +` +) + +func updateClusterResourceAttributesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := clusterresourceattribute.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for cluster resource attribute") + } + + clustrResourceAttrFileConfig := clusterresourceattribute.AttrFileConfig{} + if err := sconfig.ReadConfigFromFile(&clustrResourceAttrFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := clustrResourceAttrFileConfig.Project + domain := clustrResourceAttrFileConfig.Domain + workflowName := clustrResourceAttrFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_CLUSTER_RESOURCE, clustrResourceAttrFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go new file mode 100644 index 0000000000..f5d1c6c4e0 --- /dev/null +++ b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go @@ -0,0 +1,571 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validWorkflowClusterResourceAttributesFilePath = "testdata/valid_workflow_cluster_attribute.yaml" + validProjectDomainClusterResourceAttributesFilePath = "testdata/valid_project_domain_cluster_attribute.yaml" + validProjectClusterResourceAttributesFilePath = "testdata/valid_project_cluster_attribute.yaml" +) + +func TestClusterResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestClusterResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestClusterResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectClusterResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectClusterResourceAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowClusterResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowClusterResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} + target := newTestWorkflowClusterResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) + } + + err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} +} + +func newTestWorkflowClusterResourceAttribute() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: &admin.ClusterResourceAttributes{ + Attributes: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + } +} + +func testProjectClusterResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectClusterResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} + target := newTestProjectClusterResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) + } + + err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} +} + +func newTestProjectClusterResourceAttribute() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: &admin.ClusterResourceAttributes{ + Attributes: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + } +} + +func testProjectDomainClusterResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} + target := newTestProjectDomainClusterResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) + } + + err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} +} + +func newTestProjectDomainClusterResourceAttribute() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterResourceAttributes{ + ClusterResourceAttributes: &admin.ClusterResourceAttributes{ + Attributes: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/matchable_execution_cluster_label.go b/flytectl/cmd/update/matchable_execution_cluster_label.go new file mode 100644 index 0000000000..08b0d7424e --- /dev/null +++ b/flytectl/cmd/update/matchable_execution_cluster_label.go @@ -0,0 +1,74 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionClusterLabelShort = "Update matchable resources of execution cluster label" + executionClusterLabelLong = ` +Update execution cluster label for the given project and domain combination or additionally with workflow name. + +Updating to the execution cluster label is only available from a generated file. See the get section to generate this file. +It takes input for execution cluster label from the config file ecl.yaml +Example: content of ecl.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +:: + + flytectl update execution-cluster-label --attrFile ecl.yaml + +Update execution cluster label for project, domain, and workflow combination. This will take precedence over any other +execution cluster label defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + value: foo + +:: + + flytectl update execution-cluster-label --attrFile ecl.yaml + +Usage + +` +) + +func updateExecutionClusterLabelFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := executionclusterlabel.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for execution cluster label") + } + + executionClusterLabelFileConfig := executionclusterlabel.FileConfig{} + if err := sconfig.ReadConfigFromFile(&executionClusterLabelFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := executionClusterLabelFileConfig.Project + domain := executionClusterLabelFileConfig.Domain + workflowName := executionClusterLabelFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_EXECUTION_CLUSTER_LABEL, executionClusterLabelFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_execution_cluster_label_test.go b/flytectl/cmd/update/matchable_execution_cluster_label_test.go new file mode 100644 index 0000000000..9aed3ebc96 --- /dev/null +++ b/flytectl/cmd/update/matchable_execution_cluster_label_test.go @@ -0,0 +1,559 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validProjectExecutionClusterLabelFilePath = "testdata/valid_project_execution_cluster_label.yaml" + validProjectDomainExecutionClusterLabelFilePath = "testdata/valid_project_domain_execution_cluster_label.yaml" + validWorkflowExecutionClusterLabelFilePath = "testdata/valid_workflow_execution_cluster_label.yaml" +) + +func TestExecutionClusterLabelUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionClusterLabelUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionClusterLabelUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectExecutionClusterLabelUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionClusterLabelFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowExecutionClusterLabelUpdate( + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowExecutionClusterLabelUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} + target := newTestWorkflowExecutionClusterLabel() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionclusterlabel.DefaultUpdateConfig, target) + } + + err := updateExecutionClusterLabelFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} +} + +func newTestWorkflowExecutionClusterLabel() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: &admin.ExecutionClusterLabel{ + Value: testutils.RandomName(12), + }, + }, + }, + } +} + +func testProjectExecutionClusterLabelUpdate( + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectExecutionClusterLabelUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} + target := newTestProjectExecutionClusterLabel() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionclusterlabel.DefaultUpdateConfig, target) + } + + err := updateExecutionClusterLabelFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} +} + +func newTestProjectExecutionClusterLabel() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: &admin.ExecutionClusterLabel{ + Value: testutils.RandomName(12), + }, + }, + }, + } +} + +func testProjectDomainExecutionClusterLabelUpdate( + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} + target := newTestProjectDomainExecutionClusterLabel() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionclusterlabel.DefaultUpdateConfig, target) + } + + err := updateExecutionClusterLabelFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} +} + +func newTestProjectDomainExecutionClusterLabel() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionClusterLabel{ + ExecutionClusterLabel: &admin.ExecutionClusterLabel{ + Value: testutils.RandomName(12), + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute.go b/flytectl/cmd/update/matchable_execution_queue_attribute.go new file mode 100644 index 0000000000..8af5bd7762 --- /dev/null +++ b/flytectl/cmd/update/matchable_execution_queue_attribute.go @@ -0,0 +1,85 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + executionQueueAttributesShort = "Update matchable resources of execution queue attributes" + executionQueueAttributesLong = ` +Update execution queue attributes for the given project and domain combination or additionally with workflow name. + +Updating the execution queue attribute is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project, domain, and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute that is already set and then update it to have new values. +Refer to get execution-queue-attribute section on how to generate this file +It takes input for execution queue attributes from the config file era.yaml, +Example: content of era.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +:: + + flytectl update execution-queue-attribute --attrFile era.yaml + +Update execution queue attribute for project, domain, and workflow combination. This will take precedence over any other +execution queue attribute defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + tags: + - foo + - bar + - buzz + - lightyear + +:: + + flytectl update execution-queue-attribute --attrFile era.yaml + +Usage + +` +) + +func updateExecutionQueueAttributesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := executionqueueattribute.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for execution queue attribute") + } + + executionQueueAttrFileConfig := executionqueueattribute.AttrFileConfig{} + if err := sconfig.ReadConfigFromFile(&executionQueueAttrFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := executionQueueAttrFileConfig.Project + domain := executionQueueAttrFileConfig.Domain + workflowName := executionQueueAttrFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_EXECUTION_QUEUE, executionQueueAttrFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go new file mode 100644 index 0000000000..61638b7d51 --- /dev/null +++ b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go @@ -0,0 +1,571 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validWorkflowExecutionQueueMatchableAttributesFilePath = "testdata/valid_workflow_execution_queue_attribute.yaml" + validProjectDomainExecutionQueueMatchableAttributeFilePath = "testdata/valid_project_domain_execution_queue_attribute.yaml" + validProjectExecutionQueueMatchableAttributeFilePath = "testdata/valid_project_execution_queue_attribute.yaml" +) + +func TestExecutionQueueAttributeUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowExecutionQueueAttributeUpdate( + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} + target := newTestWorkflowExecutionQueueAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionqueueattribute.DefaultUpdateConfig, target) + } + + err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} +} + +func newTestWorkflowExecutionQueueAttribute() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: &admin.ExecutionQueueAttributes{ + Tags: []string{ + testutils.RandomName(5), + testutils.RandomName(5), + testutils.RandomName(5), + }, + }, + }, + }, + } +} + +func testProjectExecutionQueueAttributeUpdate( + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectExecutionQueueAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} + target := newTestProjectExecutionQueueAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionqueueattribute.DefaultUpdateConfig, target) + } + + err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} +} + +func newTestProjectExecutionQueueAttribute() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: &admin.ExecutionQueueAttributes{ + Tags: []string{ + testutils.RandomName(5), + testutils.RandomName(5), + testutils.RandomName(5), + }, + }, + }, + }, + } +} + +func testProjectDomainExecutionQueueAttributeUpdate( + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} + target := newTestProjectDomainExecutionQueueAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, executionqueueattribute.DefaultUpdateConfig, target) + } + + err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} +} + +func newTestProjectDomainExecutionQueueAttribute() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ExecutionQueueAttributes{ + ExecutionQueueAttributes: &admin.ExecutionQueueAttributes{ + Tags: []string{ + testutils.RandomName(5), + testutils.RandomName(5), + testutils.RandomName(5), + }, + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/matchable_plugin_override.go b/flytectl/cmd/update/matchable_plugin_override.go new file mode 100644 index 0000000000..c9f6ebe9aa --- /dev/null +++ b/flytectl/cmd/update/matchable_plugin_override.go @@ -0,0 +1,87 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + pluginOverrideShort = "Update matchable resources of plugin overrides" + pluginOverrideLong = ` +Update plugin overrides for given project and domain combination or additionally with workflow name. + +Updating to the plugin override is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing plugins overrides on custom project, domain, and workflow combination. +It is preferable to do get and generate a plugin override file if there is an existing override already set and then update it to have new values. +Refer to get plugin-override section on how to generate this file +It takes input for plugin overrides from the config file po.yaml, +Example: content of po.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +:: + + flytectl update plugin-override --attrFile po.yaml + +Update plugin override for project, domain, and workflow combination. This will take precedence over any other +plugin overrides defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +:: + + flytectl update plugin-override --attrFile po.yaml + +Usage + +` +) + +func updatePluginOverridesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := pluginoverride.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for plugin override") + } + + pluginOverrideFileConfig := pluginoverride.FileConfig{} + if err := sconfig.ReadConfigFromFile(&pluginOverrideFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := pluginOverrideFileConfig.Project + domain := pluginOverrideFileConfig.Domain + workflowName := pluginOverrideFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_PLUGIN_OVERRIDE, pluginOverrideFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_plugin_override_test.go b/flytectl/cmd/update/matchable_plugin_override_test.go new file mode 100644 index 0000000000..2b6e2e7f6b --- /dev/null +++ b/flytectl/cmd/update/matchable_plugin_override_test.go @@ -0,0 +1,589 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validProjectPluginOverrideFilePath = "testdata/valid_project_plugin_override.yaml" + validProjectDomainPluginOverrideFilePath = "testdata/valid_project_domain_plugin_override.yaml" + validWorkflowPluginOverrideFilePath = "testdata/valid_workflow_plugin_override.yaml" +) + +func TestPluginOverrideUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestPluginOverrideUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestPluginOverrideUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestPluginOverrideUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectPluginOverrideUpdate( + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectPluginOverrideFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowPluginOverrideUpdate( + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowPluginOverrideUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} + target := newTestWorkflowPluginOverride() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, pluginoverride.DefaultUpdateConfig, target) + } + + err := updatePluginOverridesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} +} + +func newTestWorkflowPluginOverride() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: &admin.PluginOverrides{ + Overrides: []*admin.PluginOverride{ + { + TaskType: testutils.RandomName(15), + PluginId: []string{ + testutils.RandomName(12), + testutils.RandomName(12), + testutils.RandomName(12), + }, + MissingPluginBehavior: admin.PluginOverride_FAIL, + }, + }, + }, + }, + }, + } +} + +func testProjectPluginOverrideUpdate( + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectPluginOverrideUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} + target := newTestProjectPluginOverride() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, pluginoverride.DefaultUpdateConfig, target) + } + + err := updatePluginOverridesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} +} + +func newTestProjectPluginOverride() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: &admin.PluginOverrides{ + Overrides: []*admin.PluginOverride{ + { + TaskType: testutils.RandomName(15), + PluginId: []string{ + testutils.RandomName(12), + testutils.RandomName(12), + testutils.RandomName(12), + }, + MissingPluginBehavior: admin.PluginOverride_FAIL, + }, + }, + }, + }, + }, + } +} + +func testProjectDomainPluginOverrideUpdate( + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainPluginOverrideUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainPluginOverrideUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} + target := newTestProjectDomainPluginOverride() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, pluginoverride.DefaultUpdateConfig, target) + } + + err := updatePluginOverridesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} +} + +func newTestProjectDomainPluginOverride() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_PluginOverrides{ + PluginOverrides: &admin.PluginOverrides{ + Overrides: []*admin.PluginOverride{ + { + TaskType: testutils.RandomName(15), + PluginId: []string{ + testutils.RandomName(12), + testutils.RandomName(12), + testutils.RandomName(12), + }, + MissingPluginBehavior: admin.PluginOverride_FAIL, + }, + }, + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/matchable_task_resource_attribute.go b/flytectl/cmd/update/matchable_task_resource_attribute.go new file mode 100644 index 0000000000..3e4282defc --- /dev/null +++ b/flytectl/cmd/update/matchable_task_resource_attribute.go @@ -0,0 +1,87 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + taskResourceAttributesShort = "Update matchable resources of task attributes" + taskResourceAttributesLong = ` +Updates the task resource attributes for the given project and domain combination or additionally with workflow name. + +Updating the task resource attribute is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project, domain, and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute already set and then update it to have new values. +Refer to get task-resource-attribute section on how to generate this file. +It takes input for task resource attributes from the config file tra.yaml, +Example: content of tra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +:: + + flytectl update task-resource-attribute --attrFile tra.yaml + +Update task resource attribute for project, domain, and workflow combination. This will take precedence over any other +resource attribute defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +:: + + flytectl update task-resource-attribute --attrFile tra.yaml + +Usage + +` +) + +func updateTaskResourceAttributesFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := taskresourceattribute.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for task resource attribute") + } + + taskResourceAttrFileConfig := taskresourceattribute.TaskResourceAttrFileConfig{} + if err := sconfig.ReadConfigFromFile(&taskResourceAttrFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := taskResourceAttrFileConfig.Project + domain := taskResourceAttrFileConfig.Domain + workflowName := taskResourceAttrFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_TASK_RESOURCE, taskResourceAttrFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_task_resource_attribute_test.go b/flytectl/cmd/update/matchable_task_resource_attribute_test.go new file mode 100644 index 0000000000..cad06fa7e7 --- /dev/null +++ b/flytectl/cmd/update/matchable_task_resource_attribute_test.go @@ -0,0 +1,568 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validProjectTaskAttributesFilePath = "testdata/valid_project_task_attribute.yaml" + validProjectDomainTaskAttributesFilePath = "testdata/valid_project_domain_task_attribute.yaml" + validWorkflowTaskAttributesFilePath = "testdata/valid_workflow_task_attribute.yaml" +) + +func TestTaskResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestTaskResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestTaskResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectTaskResourceAttributeUpdate( + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectTaskAttributesFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowTaskResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowTaskResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} + target := newTestWorkflowTaskResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, taskresourceattribute.DefaultUpdateConfig, target) + } + + err := updateTaskResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} +} + +func newTestWorkflowTaskResourceAttribute() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: testutils.RandomName(2), + Memory: testutils.RandomName(5), + }, + }, + }, + }, + } +} + +func testProjectTaskResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectTaskResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} + target := newTestProjectTaskResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, taskresourceattribute.DefaultUpdateConfig, target) + } + + err := updateTaskResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} +} + +func newTestProjectTaskResourceAttribute() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: testutils.RandomName(2), + Memory: testutils.RandomName(5), + }, + }, + }, + }, + } +} + +func testProjectDomainTaskResourceAttributeUpdate( + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} + target := newTestProjectDomainTaskResourceAttribute() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, taskresourceattribute.DefaultUpdateConfig, target) + } + + err := updateTaskResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} +} + +func newTestProjectDomainTaskResourceAttribute() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{ + TaskResourceAttributes: &admin.TaskResourceAttributes{ + Defaults: &admin.TaskResourceSpec{ + Cpu: testutils.RandomName(2), + Memory: testutils.RandomName(5), + }, + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/matchable_workflow_execution_config.go b/flytectl/cmd/update/matchable_workflow_execution_config.go new file mode 100644 index 0000000000..2921dbcf17 --- /dev/null +++ b/flytectl/cmd/update/matchable_workflow_execution_config.go @@ -0,0 +1,84 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + + sconfig "github.com/flyteorg/flytectl/cmd/config/subcommand" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + workflowExecutionConfigShort = "Updates matchable resources of workflow execution config" + workflowExecutionConfigLong = ` +Updates the workflow execution config for the given project and domain combination or additionally with workflow name. + +Updating the workflow execution config is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project and domain and workflow combination execution config. +It is preferable to do get and generate a config file if there is an existing execution config already set and then update it to have new values. +Refer to get workflow-execution-config section on how to generate this file. +It takes input for workflow execution config from the config file wec.yaml, +Example: content of wec.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: demo + +:: + + flytectl update workflow-execution-config --attrFile wec.yaml + +Update workflow execution config for project, domain, and workflow combination. This will take precedence over any other +execution config defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: mergesortsa + +:: + + flytectl update workflow-execution-config --attrFile wec.yaml + +Usage + +` +) + +func updateWorkflowExecutionConfigFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + updateConfig := workflowexecutionconfig.DefaultUpdateConfig + if len(updateConfig.AttrFile) == 0 { + return fmt.Errorf("attrFile is mandatory while calling update for workflow execution config") + } + + workflowExecutionConfigFileConfig := workflowexecutionconfig.FileConfig{} + if err := sconfig.ReadConfigFromFile(&workflowExecutionConfigFileConfig, updateConfig.AttrFile); err != nil { + return err + } + + // Get project domain workflow name from the read file. + project := workflowExecutionConfigFileConfig.Project + domain := workflowExecutionConfigFileConfig.Domain + workflowName := workflowExecutionConfigFileConfig.Workflow + + if err := DecorateAndUpdateMatchableAttr(ctx, cmdCtx, project, domain, workflowName, + admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG, workflowExecutionConfigFileConfig, + updateConfig.DryRun, updateConfig.Force); err != nil { + return err + } + return nil +} diff --git a/flytectl/cmd/update/matchable_workflow_execution_config_test.go b/flytectl/cmd/update/matchable_workflow_execution_config_test.go new file mode 100644 index 0000000000..3fd198e56a --- /dev/null +++ b/flytectl/cmd/update/matchable_workflow_execution_config_test.go @@ -0,0 +1,580 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +const ( + validProjectWorkflowExecutionConfigFilePath = "testdata/valid_project_workflow_execution_config.yaml" + validProjectDomainWorkflowExecutionConfigFilePath = "testdata/valid_project_domain_workflow_execution_config.yaml" + validWorkflowExecutionConfigFilePath = "testdata/valid_workflow_workflow_execution_config.yaml" +) + +func TestWorkflowExecutionConfigUpdateRequiresAttributeFile(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "attrFile is mandatory") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataNonExistentFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "unable to read from testdata/non-existent-file yaml file") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = testDataInvalidAttrFile + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "error unmarshaling JSON: while decoding JSON: json: unknown field \"InvalidDomain\"") + s.UpdaterExt.AssertNotCalled(t, "FetchWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) +} + +func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { + t.Run("workflow without --force", func(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("workflow with --force", func(t *testing.T) { + testWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain without --force", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain with --force", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project without --force", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project with --force", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdate( + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertNotCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project and domain development`) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(nil, ext.NewNotFoundError("attribute")) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + s.TearDownAndVerifyContains(t, `Updated attributes from flytesnacks project`) + }) + }) +} + +func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { + t.Run("workflow", func(t *testing.T) { + testWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { + config.AttrFile = validWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateWorkflowAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("domain", func(t *testing.T) { + testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { + config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectDomainAttributes", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + }) + }) + + t.Run("project", func(t *testing.T) { + testProjectWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { + config.AttrFile = validProjectWorkflowExecutionConfigFilePath + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.UpdaterExt.AssertCalled(t, "UpdateProjectAttributes", mock.Anything, mock.Anything, mock.Anything) + }) + }) +} + +func testWorkflowExecutionConfigUpdate( + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { + s.FetcherExt. + OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.WorkflowAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testWorkflowExecutionConfigUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} + target := newTestWorkflowExecutionConfig() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, workflowexecutionconfig.DefaultUpdateConfig, target) + } + + err := updateWorkflowExecutionConfigFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} +} + +func newTestWorkflowExecutionConfig() *admin.WorkflowAttributes { + return &admin.WorkflowAttributes{ + // project, domain, and workflow names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + Workflow: "core.control_flow.merge_sort.merge_sort", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ + MaxParallelism: 1337, + Annotations: &admin.Annotations{ + Values: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + }, + } +} + +func testProjectWorkflowExecutionConfigUpdate( + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { + s.FetcherExt. + OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.ProjectAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectAttributesMatch(s.Ctx, target.Project, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectWorkflowExecutionConfigUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} + target := newTestProjectWorkflowExecutionConfig() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, workflowexecutionconfig.DefaultUpdateConfig, target) + } + + err := updateWorkflowExecutionConfigFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} +} + +func newTestProjectWorkflowExecutionConfig() *admin.ProjectAttributes { + return &admin.ProjectAttributes{ + // project name needs to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ + MaxParallelism: 1337, + Annotations: &admin.Annotations{ + Values: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + }, + } +} + +func testProjectDomainWorkflowExecutionConfigUpdate( + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { + s.FetcherExt. + OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). + Return(&admin.ProjectDomainAttributesGetResponse{Attributes: target}, nil) + s.UpdaterExt. + OnUpdateProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, mock.Anything). + Return(nil) + }, + setup, + asserter, + ) +} + +func testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), + setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} + target := newTestProjectDomainWorkflowExecutionConfig() + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, workflowexecutionconfig.DefaultUpdateConfig, target) + } + + err := updateWorkflowExecutionConfigFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} +} + +func newTestProjectDomainWorkflowExecutionConfig() *admin.ProjectDomainAttributes { + return &admin.ProjectDomainAttributes{ + // project and domain names need to be same as in the tests spec files in testdata folder + Project: "flytesnacks", + Domain: "development", + MatchingAttributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_WorkflowExecutionConfig{ + WorkflowExecutionConfig: &admin.WorkflowExecutionConfig{ + MaxParallelism: 1337, + Annotations: &admin.Annotations{ + Values: map[string]string{ + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + testutils.RandomName(5): testutils.RandomName(10), + }, + }, + }, + }, + }, + } +} diff --git a/flytectl/cmd/update/named_entity.go b/flytectl/cmd/update/named_entity.go new file mode 100644 index 0000000000..449639d751 --- /dev/null +++ b/flytectl/cmd/update/named_entity.go @@ -0,0 +1,102 @@ +package update + +import ( + "context" + "fmt" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/clierrors" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" +) + +//go:generate pflags NamedEntityConfig --default-var namedEntityConfig --bind-default-var + +var ( + namedEntityConfig = &NamedEntityConfig{} +) + +type NamedEntityConfig struct { + Archive bool `json:"archive" pflag:",archive named entity."` + Activate bool `json:"activate" pflag:",activate the named entity."` + Description string `json:"description" pflag:",description of the named entity."` + DryRun bool `json:"dryRun" pflag:",execute command without making any modifications."` + Force bool `json:"force" pflag:",do not ask for an acknowledgement during updates."` +} + +func (cfg NamedEntityConfig) UpdateNamedEntity(ctx context.Context, name string, project string, domain string, rsType core.ResourceType, cmdCtx cmdCore.CommandContext) error { + if cfg.Activate && cfg.Archive { + return fmt.Errorf(clierrors.ErrInvalidStateUpdate) + } + + id := &admin.NamedEntityIdentifier{ + Project: project, + Domain: domain, + Name: name, + } + + namedEntity, err := cmdCtx.AdminClient().GetNamedEntity(ctx, &admin.NamedEntityGetRequest{ + ResourceType: rsType, + Id: id, + }) + if err != nil { + return fmt.Errorf("update metadata for %s: could not fetch metadata: %w", name, err) + } + + oldMetadata, newMetadata := composeNamedMetadataEdits(cfg, namedEntity.Metadata) + patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, oldMetadata, newMetadata) + if err != nil { + panic(err) + } + if patch == "" { + fmt.Printf("No changes detected. Skipping the update.\n") + return nil + } + + fmt.Printf("The following changes are to be applied.\n%s\n", patch) + + if cfg.DryRun { + fmt.Printf("skipping UpdateNamedEntity request (dryRun)\n") + return nil + } + + if !cfg.Force && !cmdUtil.AskForConfirmation("Continue?", os.Stdin) { + return fmt.Errorf("update aborted by user") + } + + _, err = cmdCtx.AdminClient().UpdateNamedEntity(ctx, &admin.NamedEntityUpdateRequest{ + ResourceType: rsType, + Id: id, + Metadata: newMetadata, + }) + if err != nil { + return fmt.Errorf("update metadata for %s: update failed: %w", name, err) + } + + return nil +} + +func composeNamedMetadataEdits(config NamedEntityConfig, current *admin.NamedEntityMetadata) (old *admin.NamedEntityMetadata, new *admin.NamedEntityMetadata) { + old = &admin.NamedEntityMetadata{} + new = &admin.NamedEntityMetadata{} + + switch { + case config.Activate && config.Archive: + panic("cannot both activate and archive") + case config.Activate: + old.State = current.State + new.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + case config.Archive: + old.State = current.State + new.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + } + + if config.Description != "" { + old.Description = current.Description + new.Description = config.Description + } + + return old, new +} diff --git a/flytectl/cmd/update/named_entity_test.go b/flytectl/cmd/update/named_entity_test.go new file mode 100644 index 0000000000..b02d6a5086 --- /dev/null +++ b/flytectl/cmd/update/named_entity_test.go @@ -0,0 +1,97 @@ +package update + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/cmd/testutils" +) + +func testNamedEntityUpdate( + resourceType core.ResourceType, + setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), + asserter func(s *testutils.TestStruct, err error), +) { + testNamedEntityUpdateWithMockSetup( + resourceType, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(namedEntity, nil) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(&admin.NamedEntityUpdateResponse{}, nil) + }, + setup, + asserter, + ) +} + +func testNamedEntityUpdateWithMockSetup( + resourceType core.ResourceType, + mockSetup func(s *testutils.TestStruct, namedEntity *admin.NamedEntity), + setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + config := &NamedEntityConfig{} + target := newTestNamedEntity(resourceType) + + if mockSetup != nil { + mockSetup(&s, target) + } + + if setup != nil { + setup(&s, config, target) + } + + updateMetadataFactory := getUpdateMetadataFactory(resourceType) + + args := []string{target.Id.Name} + err := updateMetadataFactory(config)(s.Ctx, args, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } +} + +func newTestNamedEntity(resourceType core.ResourceType) *admin.NamedEntity { + return &admin.NamedEntity{ + Id: &admin.NamedEntityIdentifier{ + Name: testutils.RandomName(12), + Project: config.GetConfig().Project, + Domain: config.GetConfig().Domain, + }, + ResourceType: resourceType, + Metadata: &admin.NamedEntityMetadata{ + State: admin.NamedEntityState_NAMED_ENTITY_ACTIVE, + Description: testutils.RandomName(50), + }, + } +} + +func getUpdateMetadataFactory(resourceType core.ResourceType) func(namedEntityConfig *NamedEntityConfig) func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + switch resourceType { + case core.ResourceType_LAUNCH_PLAN: + return getUpdateLPMetaFunc + case core.ResourceType_TASK: + return getUpdateTaskFunc + case core.ResourceType_WORKFLOW: + return getUpdateWorkflowFunc + } + + panic(fmt.Sprintf("no known mapping exists between resource type %s and "+ + "corresponding update metadata factory function", resourceType)) +} diff --git a/flytectl/cmd/update/namedentityconfig_flags.go b/flytectl/cmd/update/namedentityconfig_flags.go new file mode 100755 index 0000000000..2f1345bc98 --- /dev/null +++ b/flytectl/cmd/update/namedentityconfig_flags.go @@ -0,0 +1,59 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package update + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (NamedEntityConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (NamedEntityConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (NamedEntityConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in NamedEntityConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg NamedEntityConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("NamedEntityConfig", pflag.ExitOnError) + cmdFlags.BoolVar(&namedEntityConfig.Archive, fmt.Sprintf("%v%v", prefix, "archive"), namedEntityConfig.Archive, "archive named entity.") + cmdFlags.BoolVar(&namedEntityConfig.Activate, fmt.Sprintf("%v%v", prefix, "activate"), namedEntityConfig.Activate, "activate the named entity.") + cmdFlags.StringVar(&namedEntityConfig.Description, fmt.Sprintf("%v%v", prefix, "description"), namedEntityConfig.Description, "description of the named entity.") + cmdFlags.BoolVar(&namedEntityConfig.DryRun, fmt.Sprintf("%v%v", prefix, "dryRun"), namedEntityConfig.DryRun, "execute command without making any modifications.") + cmdFlags.BoolVar(&namedEntityConfig.Force, fmt.Sprintf("%v%v", prefix, "force"), namedEntityConfig.Force, "do not ask for an acknowledgement during updates.") + return cmdFlags +} diff --git a/flytectl/cmd/update/namedentityconfig_flags_test.go b/flytectl/cmd/update/namedentityconfig_flags_test.go new file mode 100755 index 0000000000..43cf00ec2a --- /dev/null +++ b/flytectl/cmd/update/namedentityconfig_flags_test.go @@ -0,0 +1,172 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package update + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsNamedEntityConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementNamedEntityConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsNamedEntityConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookNamedEntityConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementNamedEntityConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_NamedEntityConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookNamedEntityConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_NamedEntityConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_NamedEntityConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_NamedEntityConfig(val, result)) +} + +func testDecodeRaw_NamedEntityConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_NamedEntityConfig(vStringSlice, result)) +} + +func TestNamedEntityConfig_GetPFlagSet(t *testing.T) { + val := NamedEntityConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestNamedEntityConfig_SetFlags(t *testing.T) { + actual := NamedEntityConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_archive", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("archive", testValue) + if vBool, err := cmdFlags.GetBool("archive"); err == nil { + testDecodeJson_NamedEntityConfig(t, fmt.Sprintf("%v", vBool), &actual.Archive) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_activate", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("activate", testValue) + if vBool, err := cmdFlags.GetBool("activate"); err == nil { + testDecodeJson_NamedEntityConfig(t, fmt.Sprintf("%v", vBool), &actual.Activate) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_description", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("description", testValue) + if vString, err := cmdFlags.GetString("description"); err == nil { + testDecodeJson_NamedEntityConfig(t, fmt.Sprintf("%v", vString), &actual.Description) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dryRun", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dryRun", testValue) + if vBool, err := cmdFlags.GetBool("dryRun"); err == nil { + testDecodeJson_NamedEntityConfig(t, fmt.Sprintf("%v", vBool), &actual.DryRun) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_force", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("force", testValue) + if vBool, err := cmdFlags.GetBool("force"); err == nil { + testDecodeJson_NamedEntityConfig(t, fmt.Sprintf("%v", vBool), &actual.Force) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flytectl/cmd/update/project.go b/flytectl/cmd/update/project.go new file mode 100644 index 0000000000..bf883af450 --- /dev/null +++ b/flytectl/cmd/update/project.go @@ -0,0 +1,191 @@ +package update + +import ( + "context" + "fmt" + "os" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" +) + +const ( + projectShort = "Update the characteristics of a project" + projectLong = ` +Allows you to update the characteristics of a project, including its name, labels and description. +Also allows you to archive or activate (unarchive) a project. + +To archive a project, specify its ID with the *p* flag and add the *archive* flag: + +:: + + flytectl update project -p my-project-id --archive + +To activate (unarchive) an archived project, specify its ID with the *p* flag and add the *activate* flag: + +:: + + flytectl update project -p my-project-id --activate + +To update the characteristics of a project using flags, specify the project ID with the *p* flag and the flags corresponding to the characteristics you want to update: + +:: + + flytectl update project -p my-project-id --description "A wonderful project" --labels app=my-app + +To update the characteristics of a project using a *yaml* file, define the file with the project ID desired updates: + +.. code-block:: yaml + + id: "my-project-id" + name: "my-project-name" + labels: + values: + app: my-app + description: "A wonderful project" + + +(Note: The name parameter must not contain whitespace) + +Then, pass it in using the *file* flag: + +:: + + flytectl update project --file project.yaml + +To archive or activate (unarchive) a project using a *yaml* file: + +* Add a state field, with a value of *0* for activated (unarchived) or *1* for archived, at the top level of the the *yaml* file. + +* Add the *archive* flag to the command. + +For example, to archive a project: + +.. code-block:: yaml + + # update.yaml + id: "my-project-id" + state: 1 + +:: + + $ uctl update project --file update.yaml --archive + +And to activate (unarchive) the same project: + +.. code-block:: yaml + + # update.yaml + id: "my-project-id" + state: 0 + +:: + + $ uctl update project --file update.yaml --archive + +Note that when using a *yaml* file, the *activate* flag is not used. +Instead, the *archive* flag is used for *both* archiving and activating (unarchiving) with the difference being in the *state* field of the *yaml* file. +Furthermore, the *state* field only takes effect if the *archive* flag is present in the command. + +Usage +` +) + +func updateProjectsFunc(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + projectConfig := project.DefaultProjectConfig + + edits, err := projectConfig.GetProjectSpec(config.GetConfig()) + if err != nil { + return err + } + + if edits.Id == "" { + return fmt.Errorf(clierrors.ErrProjectNotPassed) + } + + currentProject, err := cmdCtx.AdminFetcherExt().GetProjectByID(ctx, edits.Id) + if err != nil { + return fmt.Errorf("update project %s: could not fetch project: %w", edits.Id, err) + } + + // We do not compare currentProject against edits directly, because edits does not + // have a complete set of project's fields - it will only contain fields that + // the update command allows updating. (For example, it won't have Domains field + // initialized.) + currentProjectWithEdits := copyProjectWithEdits(currentProject, edits, projectConfig) + patch, err := DiffAsYaml(diffPathBefore, diffPathAfter, currentProject, currentProjectWithEdits) + if err != nil { + panic(err) + } + if patch == "" { + fmt.Printf("No changes detected. Skipping the update.\n") + return nil + } + + fmt.Printf("The following changes are to be applied.\n%s\n", patch) + + if project.DefaultProjectConfig.DryRun { + fmt.Printf("skipping UpdateProject request (dryRun)\n") + return nil + } + + if !project.DefaultProjectConfig.Force && !cmdUtil.AskForConfirmation("Continue?", os.Stdin) { + return fmt.Errorf("update aborted by user") + } + + _, err = cmdCtx.AdminClient().UpdateProject(ctx, edits) + if err != nil { + return fmt.Errorf(clierrors.ErrFailedProjectUpdate, edits.Id, err) + } + + fmt.Printf("project %s updated\n", edits.Id) + return nil +} + +// Makes a shallow copy of target and applies certain properties from edited to it. +// The properties applied are only the ones supported by update command: state, name, +// description, labels, etc. +func copyProjectWithEdits(target *admin.Project, edited *admin.Project, projectConfig *project.ConfigProject) *admin.Project { + copy := *target + + if edited.Name != "" { + copy.Name = edited.Name + } + if edited.Description != "" { + copy.Description = edited.Description + } + if len(edited.GetLabels().GetValues()) != 0 { + copy.Labels = edited.Labels + } + + // `edited` comes with `admin.Project_ACTIVE` state by default + // if both `activate` and `archive` flags have not been set. + // + // This will overwrite state of `copy` if we directly set it + // without checking for flags, which will show up on the diff. + // + // Also, after showing the diff, the `edited` is used for updating + // the project, which comes with `Project_ACTIVE` by default + // unless overwritten. Therefore, on the `else` block, + // we overwrite the `edited` with the state of `copy` + // if both `archive` and `activate` flags are unset. + // + // This is a bit hacky IMO. Proper solution would be to + // refactor `project.ConfigProject` and this file in order to + // separate the logic of setting `ConfigProject` struct fields + // from creation of a 'default' project based on those flags. + // Having a proper order of precedence between global config, + // YAML file input, and the flags for `ConfigProject` would also + // be good. + if projectConfig.Archive || projectConfig.Activate { + copy.State = edited.State + } else { + edited.State = copy.State + } + return © +} diff --git a/flytectl/cmd/update/project_test.go b/flytectl/cmd/update/project_test.go new file mode 100644 index 0000000000..1ef2c7b267 --- /dev/null +++ b/flytectl/cmd/update/project_test.go @@ -0,0 +1,277 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flytectl/cmd/config" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" +) + +func TestProjectCanBeActivated(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateProject", s.Ctx, + mock.MatchedBy( + func(r *admin.Project) bool { + return r.State == admin.Project_ACTIVE + })) + }) +} + +func TestProjectCanBeArchived(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateProject", s.Ctx, + mock.MatchedBy( + func(r *admin.Project) bool { + return r.State == admin.Project_ARCHIVED + })) + }) +} + +func TestProjectCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + config.Activate = true + config.Archive = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or archive") + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) +} + +func TestProjectUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) +} + +func TestProjectUpdateWithoutForceFlagFails(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) +} + +func TestProjectUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }) + }) +} + +func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { + testProjectUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { + s.FetcherExt. + OnGetProjectByID(s.Ctx, project.Id). + Return(nil, ext.NewNotFoundError("project not found")) + s.MockAdminClient. + OnUpdateProjectMatch(s.Ctx, mock.Anything). + Return(&admin.ProjectUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }, + ) +} + +func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { + testProjectUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { + s.FetcherExt. + OnGetProjectByID(s.Ctx, project.Id). + Return(project, nil) + s.MockAdminClient. + OnUpdateProjectMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateProject", mock.Anything, mock.Anything) + }, + ) +} + +func TestProjectUpdateRequiresProjectId(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + config.ID = "" + }, + func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "project id wasn't passed") + }) +} + +func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { + testProjectUpdate( + /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { + project.State = admin.Project_ARCHIVED + config.Activate = false + config.Archive = false + config.Description = testutils.RandomName(12) + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateProject", s.Ctx, + mock.MatchedBy( + func(r *admin.Project) bool { + return r.State == admin.Project_ARCHIVED + })) + }) +} + +func testProjectUpdate( + setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), + asserter func(s *testutils.TestStruct, err error), +) { + testProjectUpdateWithMockSetup( + /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { + s.FetcherExt. + OnGetProjectByID(s.Ctx, project.Id). + Return(project, nil) + s.MockAdminClient. + OnUpdateProjectMatch(s.Ctx, mock.Anything). + Return(&admin.ProjectUpdateResponse{}, nil) + }, + setup, + asserter, + ) +} + +func testProjectUpdateWithMockSetup( + mockSetup func(s *testutils.TestStruct, project *admin.Project), + setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), + asserter func(s *testutils.TestStruct, err error), +) { + s := testutils.Setup() + target := newTestProject() + + if mockSetup != nil { + mockSetup(&s, target) + } + + project.DefaultProjectConfig = &project.ConfigProject{ + ID: target.Id, + } + config.GetConfig().Project = "" + config.GetConfig().Domain = "" + if setup != nil { + setup(&s, project.DefaultProjectConfig, target) + } + + err := updateProjectsFunc(s.Ctx, nil, s.CmdCtx) + + if asserter != nil { + asserter(&s, err) + } + + // cleanup + project.DefaultProjectConfig = &project.ConfigProject{} + config.GetConfig().Project = "" + config.GetConfig().Domain = "" +} + +func newTestProject() *admin.Project { + return &admin.Project{ + Id: testutils.RandomName(12), + Name: testutils.RandomName(12), + State: admin.Project_ACTIVE, + Domains: []*admin.Domain{ + { + Id: testutils.RandomName(12), + Name: testutils.RandomName(12), + }, + }, + Description: testutils.RandomName(12), + Labels: &admin.Labels{ + Values: map[string]string{ + testutils.RandomName(5): testutils.RandomName(12), + testutils.RandomName(5): testutils.RandomName(12), + testutils.RandomName(5): testutils.RandomName(12), + }, + }, + } +} diff --git a/flytectl/cmd/update/task_meta.go b/flytectl/cmd/update/task_meta.go new file mode 100644 index 0000000000..302ceb801f --- /dev/null +++ b/flytectl/cmd/update/task_meta.go @@ -0,0 +1,53 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + updateTaskShort = "Update task metadata" + updateTaskLong = ` +Update the description on the task: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --description "Merge sort example" + +Archiving task named entity is not supported and would throw an error: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --archive + +Activating task named entity would be a noop since archiving is not possible: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --activate + +Usage +` +) + +func getUpdateTaskFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + return func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) != 1 { + return fmt.Errorf(clierrors.ErrTaskNotPassed) + } + + name := args[0] + err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_TASK, cmdCtx) + if err != nil { + fmt.Printf(clierrors.ErrFailedTaskUpdate, name, err) + return err + } + + fmt.Printf("updated metadata successfully on %v", name) + return nil + } +} diff --git a/flytectl/cmd/update/task_meta_test.go b/flytectl/cmd/update/task_meta_test.go new file mode 100644 index 0000000000..01d9f3c742 --- /dev/null +++ b/flytectl/cmd/update/task_meta_test.go @@ -0,0 +1,196 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestTaskMetadataCanBeActivated(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ACTIVE + })) + }) +} + +func TestTaskMetadataCanBeArchived(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + })) + }) +} + +func TestTaskMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + config.Activate = true + config.Archive = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or archive") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestTaskMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestTaskMetadataUpdateWithoutForceFlagFails(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestTaskMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_TASK, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) +} + +func TestTaskMetadataUpdateFailsWhenTaskDoesNotExist(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_TASK, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(nil, ext.NewNotFoundError("named entity not found")) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(&admin.NamedEntityUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestTaskMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_TASK, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(namedEntity, nil) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestTaskMetadataUpdateRequiresTaskName(t *testing.T) { + s := testutils.Setup() + config := &NamedEntityConfig{} + + err := getUpdateTaskFunc(config)(s.Ctx, nil, s.CmdCtx) + + assert.ErrorContains(t, err, "task name wasn't passed") +} diff --git a/flytectl/cmd/update/testdata/invalid_attribute.yaml b/flytectl/cmd/update/testdata/invalid_attribute.yaml new file mode 100644 index 0000000000..1e7868c1e2 --- /dev/null +++ b/flytectl/cmd/update/testdata/invalid_attribute.yaml @@ -0,0 +1,5 @@ +InvalidDomain: development +InvalidProject: flytesnacks +InvalidWorkflow: "" +cpu: "1" +memory: 150Mi \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_cluster_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_cluster_attribute.yaml new file mode 100644 index 0000000000..27dc7e2f3c --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_cluster_attribute.yaml @@ -0,0 +1,4 @@ +project: flytesnacks +attributes: + "foo": "bar" + "buzz": "lightyear" \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_domain_cluster_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_domain_cluster_attribute.yaml new file mode 100644 index 0000000000..586fe522f3 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_cluster_attribute.yaml @@ -0,0 +1,5 @@ +domain: development +project: flytesnacks +attributes: + "foo": "bar" + "buzz": "lightyear" \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_domain_execution_cluster_label.yaml b/flytectl/cmd/update/testdata/valid_project_domain_execution_cluster_label.yaml new file mode 100644 index 0000000000..afade68509 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_execution_cluster_label.yaml @@ -0,0 +1,3 @@ +domain: development +project: flytesnacks +value: foo \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_domain_execution_queue_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_domain_execution_queue_attribute.yaml new file mode 100644 index 0000000000..1620c65762 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_execution_queue_attribute.yaml @@ -0,0 +1,7 @@ +domain: development +project: flytesnacks +tags: + - foo + - bar + - buzz + - lightyear \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_domain_plugin_override.yaml b/flytectl/cmd/update/testdata/valid_project_domain_plugin_override.yaml new file mode 100644 index 0000000000..9749e17100 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_plugin_override.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +overrides: + - task_type: python_task + plugin_id: + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # 0 : FAIL , 1: DEFAULT diff --git a/flytectl/cmd/update/testdata/valid_project_domain_task_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_domain_task_attribute.yaml new file mode 100644 index 0000000000..cd1a5c9abc --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_task_attribute.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +defaults: + cpu: "1" + memory: 150Mi +limits: + cpu: "2" + memory: 450Mi \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_domain_workflow_execution_config.yaml b/flytectl/cmd/update/testdata/valid_project_domain_workflow_execution_config.yaml new file mode 100644 index 0000000000..84b87197a1 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_domain_workflow_execution_config.yaml @@ -0,0 +1,3 @@ +domain: development +project: flytesnacks +max_parallelism: 5 \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_execution_cluster_label.yaml b/flytectl/cmd/update/testdata/valid_project_execution_cluster_label.yaml new file mode 100644 index 0000000000..7d9e207ba7 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_execution_cluster_label.yaml @@ -0,0 +1,2 @@ +project: flytesnacks +value: foo \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_execution_queue_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_execution_queue_attribute.yaml new file mode 100644 index 0000000000..7ddb5f135d --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_execution_queue_attribute.yaml @@ -0,0 +1,6 @@ +project: flytesnacks +tags: + - foo + - bar + - buzz + - lightyear \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_plugin_override.yaml b/flytectl/cmd/update/testdata/valid_project_plugin_override.yaml new file mode 100644 index 0000000000..1ad8e5cd01 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_plugin_override.yaml @@ -0,0 +1,7 @@ +project: flytesnacks +overrides: + - task_type: python_task + plugin_id: + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # 0 : FAIL , 1: DEFAULT diff --git a/flytectl/cmd/update/testdata/valid_project_task_attribute.yaml b/flytectl/cmd/update/testdata/valid_project_task_attribute.yaml new file mode 100644 index 0000000000..77281d5a22 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_task_attribute.yaml @@ -0,0 +1,7 @@ +project: flytesnacks +defaults: + cpu: "1" + memory: 150Mi +limits: + cpu: "2" + memory: 450Mi \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_project_workflow_execution_config.yaml b/flytectl/cmd/update/testdata/valid_project_workflow_execution_config.yaml new file mode 100644 index 0000000000..414e3ecbb4 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_project_workflow_execution_config.yaml @@ -0,0 +1,2 @@ +project: flytesnacks +max_parallelism: 5 \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_cluster_attribute.yaml b/flytectl/cmd/update/testdata/valid_workflow_cluster_attribute.yaml new file mode 100644 index 0000000000..e4030e455d --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_cluster_attribute.yaml @@ -0,0 +1,6 @@ +Domain: development +Project: flytesnacks +Workflow: core.control_flow.merge_sort.merge_sort +attributes: + "foo": "bar" + "buzz": "lightyear" \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_execution_cluster_label.yaml b/flytectl/cmd/update/testdata/valid_workflow_execution_cluster_label.yaml new file mode 100644 index 0000000000..068cbe9926 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_execution_cluster_label.yaml @@ -0,0 +1,4 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +value: foo \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_execution_queue_attribute.yaml b/flytectl/cmd/update/testdata/valid_workflow_execution_queue_attribute.yaml new file mode 100644 index 0000000000..d8952b1a6c --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_execution_queue_attribute.yaml @@ -0,0 +1,8 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +tags: + - foo + - bar + - buzz + - lightyear \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_plugin_override.yaml b/flytectl/cmd/update/testdata/valid_workflow_plugin_override.yaml new file mode 100644 index 0000000000..5b35e23e31 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_plugin_override.yaml @@ -0,0 +1,9 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +overrides: + - task_type: python_task + plugin_id: + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # 0 : FAIL , 1: DEFAULT \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_task_attribute.yaml b/flytectl/cmd/update/testdata/valid_workflow_task_attribute.yaml new file mode 100644 index 0000000000..7c22207689 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_task_attribute.yaml @@ -0,0 +1,9 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +defaults: + cpu: "2" + memory: 250Mi +limits: + cpu: "3" + memory: 350Mi \ No newline at end of file diff --git a/flytectl/cmd/update/testdata/valid_workflow_workflow_execution_config.yaml b/flytectl/cmd/update/testdata/valid_workflow_workflow_execution_config.yaml new file mode 100644 index 0000000000..e4f6ec0049 --- /dev/null +++ b/flytectl/cmd/update/testdata/valid_workflow_workflow_execution_config.yaml @@ -0,0 +1,4 @@ +domain: development +project: flytesnacks +workflow: core.control_flow.merge_sort.merge_sort +max_parallelism: 5 \ No newline at end of file diff --git a/flytectl/cmd/update/update.go b/flytectl/cmd/update/update.go new file mode 100644 index 0000000000..9677ee897e --- /dev/null +++ b/flytectl/cmd/update/update.go @@ -0,0 +1,66 @@ +package update + +import ( + "github.com/flyteorg/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/execution" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flytectl/cmd/config/subcommand/executionqueueattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/launchplan" + pluginoverride "github.com/flyteorg/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flytectl/cmd/config/subcommand/project" + "github.com/flyteorg/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flytectl/cmd/config/subcommand/workflowexecutionconfig" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + updateUse = "update" + updateShort = `Update Flyte resources e.g., project.` + updatecmdLong = ` +Provides subcommands to update Flyte resources, such as tasks, workflows, launch plans, executions, and projects. +Update Flyte resource; e.g., to activate a project: +:: + + flytectl update project -p flytesnacks --activate +` +) + +// CreateUpdateCommand will return update command +func CreateUpdateCommand() *cobra.Command { + updateCmd := &cobra.Command{ + Use: updateUse, + Short: updateShort, + Long: updatecmdLong, + } + updateResourcesFuncs := map[string]cmdCore.CommandEntry{ + "launchplan": {CmdFunc: updateLPFunc, Aliases: []string{}, ProjectDomainNotRequired: false, PFlagProvider: launchplan.UConfig, + Short: updateLPShort, Long: updateLPLong}, + "launchplan-meta": {CmdFunc: getUpdateLPMetaFunc(namedEntityConfig), Aliases: []string{}, ProjectDomainNotRequired: false, PFlagProvider: namedEntityConfig, + Short: updateLPMetaShort, Long: updateLPMetaLong}, + "project": {CmdFunc: updateProjectsFunc, Aliases: []string{}, ProjectDomainNotRequired: true, PFlagProvider: project.DefaultProjectConfig, + Short: projectShort, Long: projectLong}, + "execution": {CmdFunc: updateExecutionFunc, Aliases: []string{}, ProjectDomainNotRequired: false, PFlagProvider: execution.UConfig, + Short: updateExecutionShort, Long: updateExecutionLong}, + "task-meta": {CmdFunc: getUpdateTaskFunc(namedEntityConfig), Aliases: []string{}, ProjectDomainNotRequired: false, PFlagProvider: namedEntityConfig, + Short: updateTaskShort, Long: updateTaskLong}, + "workflow-meta": {CmdFunc: getUpdateWorkflowFunc(namedEntityConfig), Aliases: []string{}, ProjectDomainNotRequired: false, PFlagProvider: namedEntityConfig, + Short: updateWorkflowShort, Long: updateWorkflowLong}, + "task-resource-attribute": {CmdFunc: updateTaskResourceAttributesFunc, Aliases: []string{}, PFlagProvider: taskresourceattribute.DefaultUpdateConfig, + Short: taskResourceAttributesShort, Long: taskResourceAttributesLong, ProjectDomainNotRequired: true}, + "cluster-resource-attribute": {CmdFunc: updateClusterResourceAttributesFunc, Aliases: []string{}, PFlagProvider: clusterresourceattribute.DefaultUpdateConfig, + Short: clusterResourceAttributesShort, Long: clusterResourceAttributesLong, ProjectDomainNotRequired: true}, + "execution-queue-attribute": {CmdFunc: updateExecutionQueueAttributesFunc, Aliases: []string{}, PFlagProvider: executionqueueattribute.DefaultUpdateConfig, + Short: executionQueueAttributesShort, Long: executionQueueAttributesLong, ProjectDomainNotRequired: true}, + "execution-cluster-label": {CmdFunc: updateExecutionClusterLabelFunc, Aliases: []string{}, PFlagProvider: executionclusterlabel.DefaultUpdateConfig, + Short: executionClusterLabelShort, Long: executionClusterLabelLong, ProjectDomainNotRequired: true}, + "plugin-override": {CmdFunc: updatePluginOverridesFunc, Aliases: []string{}, PFlagProvider: pluginoverride.DefaultUpdateConfig, + Short: pluginOverrideShort, Long: pluginOverrideLong, ProjectDomainNotRequired: true}, + "workflow-execution-config": {CmdFunc: updateWorkflowExecutionConfigFunc, Aliases: []string{}, PFlagProvider: workflowexecutionconfig.DefaultUpdateConfig, + Short: workflowExecutionConfigShort, Long: workflowExecutionConfigLong, ProjectDomainNotRequired: true}, + } + cmdCore.AddCommands(updateCmd, updateResourcesFuncs) + return updateCmd +} diff --git a/flytectl/cmd/update/update_test.go b/flytectl/cmd/update/update_test.go new file mode 100644 index 0000000000..23ec7d3495 --- /dev/null +++ b/flytectl/cmd/update/update_test.go @@ -0,0 +1,39 @@ +package update + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + testDataNonExistentFile = "testdata/non-existent-file" + testDataInvalidAttrFile = "testdata/invalid_attribute.yaml" +) + +func TestUpdateCommand(t *testing.T) { + updateCommand := CreateUpdateCommand() + assert.Equal(t, updateCommand.Use, updateUse) + assert.Equal(t, updateCommand.Short, updateShort) + assert.Equal(t, updateCommand.Long, updatecmdLong) + assert.Equal(t, len(updateCommand.Commands()), 12) + cmdNouns := updateCommand.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + useArray := []string{"cluster-resource-attribute", "execution", "execution-cluster-label", "execution-queue-attribute", "launchplan", + "launchplan-meta", "plugin-override", "project", "task-meta", "task-resource-attribute", "workflow-execution-config", "workflow-meta"} + aliases := [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}} + shortArray := []string{clusterResourceAttributesShort, updateExecutionShort, executionClusterLabelShort, executionQueueAttributesShort, updateLPShort, updateLPMetaShort, + pluginOverrideShort, projectShort, updateTaskShort, taskResourceAttributesShort, workflowExecutionConfigShort, updateWorkflowShort} + longArray := []string{clusterResourceAttributesLong, updateExecutionLong, executionClusterLabelLong, executionQueueAttributesLong, updateLPLong, updateLPMetaLong, + pluginOverrideLong, projectLong, updateTaskLong, taskResourceAttributesLong, workflowExecutionConfigLong, updateWorkflowLong} + for i := range cmdNouns { + assert.Equal(t, cmdNouns[i].Use, useArray[i]) + assert.Equal(t, cmdNouns[i].Aliases, aliases[i]) + assert.Equal(t, cmdNouns[i].Short, shortArray[i]) + assert.Equal(t, cmdNouns[i].Long, longArray[i]) + } +} diff --git a/flytectl/cmd/update/workflow_meta.go b/flytectl/cmd/update/workflow_meta.go new file mode 100644 index 0000000000..d2c94d7e3e --- /dev/null +++ b/flytectl/cmd/update/workflow_meta.go @@ -0,0 +1,51 @@ +package update + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config" + cmdCore "github.com/flyteorg/flytectl/cmd/core" +) + +const ( + updateWorkflowShort = "Update workflow metadata" + updateWorkflowLong = ` +Update the description on the workflow: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --description "Mergesort workflow example" + +Archiving workflow named entity would cause this to disappear from flyteconsole UI: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --archive + +Activate workflow named entity: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --activate + +Usage +` +) + +func getUpdateWorkflowFunc(namedEntityConfig *NamedEntityConfig) func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + return func(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + project := config.GetConfig().Project + domain := config.GetConfig().Domain + if len(args) != 1 { + return fmt.Errorf(clierrors.ErrWorkflowNotPassed) + } + name := args[0] + err := namedEntityConfig.UpdateNamedEntity(ctx, name, project, domain, core.ResourceType_WORKFLOW, cmdCtx) + if err != nil { + fmt.Printf(clierrors.ErrFailedWorkflowUpdate, name, err) + return err + } + fmt.Printf("updated metadata successfully on %v", name) + return nil + } +} diff --git a/flytectl/cmd/update/workflow_meta_test.go b/flytectl/cmd/update/workflow_meta_test.go new file mode 100644 index 0000000000..cfdc628751 --- /dev/null +++ b/flytectl/cmd/update/workflow_meta_test.go @@ -0,0 +1,196 @@ +package update + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/cmd/testutils" + "github.com/flyteorg/flytectl/pkg/ext" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestWorkflowMetadataCanBeActivated(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ACTIVE + })) + }) +} + +func TestWorkflowMetadataCanBeArchived(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Archive = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertCalled( + t, "UpdateNamedEntity", s.Ctx, + mock.MatchedBy( + func(r *admin.NamedEntityUpdateRequest) bool { + return r.GetMetadata().GetState() == admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + })) + }) +} + +func TestWorkflowMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + config.Activate = true + config.Archive = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "Specify either activate or archive") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestWorkflowMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestWorkflowMetadataUpdateWithoutForceFlagFails(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = false + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.ErrorContains(t, err, "update aborted by user") + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestWorkflowMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) +} + +func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) { + t.Run("without --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = false + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) + + t.Run("with --force", func(t *testing.T) { + testNamedEntityUpdate(core.ResourceType_WORKFLOW, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + + config.Force = true + config.DryRun = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Nil(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }) + }) +} + +func TestWorkflowMetadataUpdateFailsWhenWorkflowDoesNotExist(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_WORKFLOW, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(nil, ext.NewNotFoundError("named entity not found")) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(&admin.NamedEntityUpdateResponse{}, nil) + }, + /* setup */ nil, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertNotCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestWorkflowMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { + testNamedEntityUpdateWithMockSetup( + core.ResourceType_WORKFLOW, + /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { + s.MockAdminClient. + OnGetNamedEntityMatch( + s.Ctx, + mock.MatchedBy(func(r *admin.NamedEntityGetRequest) bool { + return r.ResourceType == namedEntity.ResourceType && + cmp.Equal(r.Id, namedEntity.Id) + })). + Return(namedEntity, nil) + s.MockAdminClient. + OnUpdateNamedEntityMatch(s.Ctx, mock.Anything). + Return(nil, fmt.Errorf("network error")) + }, + /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { + namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED + config.Activate = true + config.Force = true + }, + /* assert */ func(s *testutils.TestStruct, err error) { + assert.Error(t, err) + s.MockAdminClient.AssertCalled(t, "UpdateNamedEntity", mock.Anything, mock.Anything) + }, + ) +} + +func TestWorkflowMetadataUpdateRequiresWorkflowName(t *testing.T) { + s := testutils.Setup() + config := &NamedEntityConfig{} + + err := getUpdateWorkflowFunc(config)(s.Ctx, nil, s.CmdCtx) + + assert.ErrorContains(t, err, "workflow name wasn't passed") +} diff --git a/flytectl/cmd/upgrade/upgrade.go b/flytectl/cmd/upgrade/upgrade.go new file mode 100644 index 0000000000..b01a795a3a --- /dev/null +++ b/flytectl/cmd/upgrade/upgrade.go @@ -0,0 +1,148 @@ +package upgrade + +import ( + "context" + "errors" + "fmt" + "os" + "runtime" + "strings" + + "github.com/flyteorg/flytectl/pkg/util" + + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + + "github.com/flyteorg/flytectl/pkg/github" + + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/mouuff/go-rocket-update/pkg/updater" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/flyteorg/flytectl/pkg/platformutil" + "github.com/spf13/cobra" +) + +type Goos string + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + upgradeCmdShort = `Upgrades/rollbacks to a Flyte version.` + upgradeCmdLong = ` +For Flytectl, it is: +:: + + flytectl upgrade + +.. note:: + Please upgrade with sudo. Failing to do so may result in a permission issues. + +Rollback Flytectl binary: +:: + + flytectl upgrade rollback + +.. note:: + Upgrade is not available on Windows. +` + rollBackSubCommand = "rollback" +) + +var ( + goos = platformutil.Platform(runtime.GOOS) +) + +// SelfUpgrade will return self upgrade command +func SelfUpgrade(rootCmd *cobra.Command) map[string]cmdCore.CommandEntry { + getResourcesFuncs := map[string]cmdCore.CommandEntry{ + "upgrade": { + CmdFunc: selfUpgrade, + Aliases: []string{"upgrade"}, + ProjectDomainNotRequired: true, + Short: upgradeCmdShort, + Long: upgradeCmdLong, + DisableFlyteClient: true, + }, + } + return getResourcesFuncs +} + +func selfUpgrade(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + // Check if it's a rollback + if len(args) == 1 { + if args[0] == rollBackSubCommand && !isRollBackSupported(goos) { + return nil + } + ext, err := github.FlytectlReleaseConfig.GetExecutable() + if err != nil { + return err + } + backupBinary := fmt.Sprintf("%s.old", ext) + if _, err := os.Stat(backupBinary); err != nil { + return errors.New("flytectl backup doesn't exist. Rollback is not possible") + } + return github.FlytectlReleaseConfig.Rollback() + } + + if isSupported, err := isUpgradeSupported(goos); err != nil { + return err + } else if !isSupported { + return nil + } + + if message, err := upgrade(github.FlytectlReleaseConfig); err != nil { + return err + } else if len(message) > 0 { + logger.Info(ctx, message) + } + return nil +} + +func upgrade(u *updater.Updater) (string, error) { + updateStatus, err := u.Update() + if err != nil { + return "", err + } + + if updateStatus == updater.Updated { + latestVersion, err := u.GetLatestVersion() + if err != nil { + return "", err + } + return fmt.Sprintf("Successfully updated to version %s", latestVersion), nil + } + return "", nil +} + +func isUpgradeSupported(goos platformutil.Platform) (bool, error) { + latest, err := github.FlytectlReleaseConfig.GetLatestVersion() + if err != nil { + return false, err + } + + if isGreater, err := util.IsVersionGreaterThan(latest, stdlibversion.Version); err != nil { + return false, err + } else if !isGreater { + fmt.Println("You already have the latest version of Flytectl") + return false, nil + } + + message, err := github.GetUpgradeMessage(latest, goos) + if err != nil { + return false, err + } + if goos.String() == platformutil.Windows.String() || strings.Contains(message, "brew") { + if len(message) > 0 { + fmt.Println(message) + } + return false, nil + } + return true, nil +} + +func isRollBackSupported(goos platformutil.Platform) bool { + if goos.String() == platformutil.Windows.String() { + fmt.Printf("Flytectl rollback is not available on %s \n", goos.String()) + return false + } + return true +} diff --git a/flytectl/cmd/upgrade/upgrade_test.go b/flytectl/cmd/upgrade/upgrade_test.go new file mode 100644 index 0000000000..852d93dd1d --- /dev/null +++ b/flytectl/cmd/upgrade/upgrade_test.go @@ -0,0 +1,177 @@ +package upgrade + +import ( + "sort" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/flyteorg/flytectl/pkg/github" + "github.com/flyteorg/flytectl/pkg/util" + + "github.com/flyteorg/flytectl/pkg/platformutil" + + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +var ( + version = "v0.2.20" + tempExt = "flyte.ext" +) + +func TestUpgradeCommand(t *testing.T) { + rootCmd := &cobra.Command{ + Long: "Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service.", + Short: "Flytectl CLI tool", + Use: "flytectl", + DisableAutoGenTag: true, + } + upgradeCmd := SelfUpgrade(rootCmd) + cmdCore.AddCommands(rootCmd, upgradeCmd) + assert.Equal(t, len(rootCmd.Commands()), 1) + cmdNouns := rootCmd.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, cmdNouns[0].Use, "upgrade") + assert.Equal(t, cmdNouns[0].Short, upgradeCmdShort) + assert.Equal(t, cmdNouns[0].Long, upgradeCmdLong) +} + +func TestUpgrade(t *testing.T) { + _ = util.WriteIntoFile([]byte("data"), tempExt) + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + t.Run("Successful upgrade", func(t *testing.T) { + message, err := upgrade(github.FlytectlReleaseConfig) + assert.Nil(t, err) + assert.Contains(t, message, "Successfully updated to version") + }) +} + +func TestCheckGoosForRollback(t *testing.T) { + stdlibversion.Version = version + linux := platformutil.Linux + windows := platformutil.Windows + darwin := platformutil.Darwin + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + t.Run("checkGOOSForRollback on linux", func(t *testing.T) { + assert.Equal(t, true, isRollBackSupported(linux)) + assert.Equal(t, false, isRollBackSupported(windows)) + assert.Equal(t, true, isRollBackSupported(darwin)) + }) +} + +func TestIsUpgradeable(t *testing.T) { + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + linux := platformutil.Linux + windows := platformutil.Windows + darwin := platformutil.Darwin + t.Run("IsUpgradeable on linux", func(t *testing.T) { + check, err := isUpgradeSupported(linux) + assert.Nil(t, err) + assert.Equal(t, true, check) + }) + t.Run("IsUpgradeable on darwin", func(t *testing.T) { + check, err := isUpgradeSupported(darwin) + assert.Nil(t, err) + assert.Equal(t, true, check) + }) + t.Run("IsUpgradeable on darwin using brew", func(t *testing.T) { + check, err := isUpgradeSupported(darwin) + assert.Nil(t, err) + assert.Equal(t, true, check) + }) + t.Run("isUpgradeSupported failed", func(t *testing.T) { + stdlibversion.Version = "v" + check, err := isUpgradeSupported(linux) + assert.NotNil(t, err) + assert.Equal(t, false, check) + stdlibversion.Version = version + }) + t.Run("isUpgradeSupported windows", func(t *testing.T) { + check, err := isUpgradeSupported(windows) + assert.Nil(t, err) + assert.Equal(t, false, check) + }) +} + +func TestSelfUpgrade(t *testing.T) { + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + goos = platformutil.Linux + t.Run("Successful upgrade", func(t *testing.T) { + s := testutils.Setup() + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = version + + assert.Nil(t, selfUpgrade(s.Ctx, []string{}, s.CmdCtx)) + }) +} + +func TestSelfUpgradeError(t *testing.T) { + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + goos = platformutil.Linux + t.Run("Successful upgrade", func(t *testing.T) { + s := testutils.Setup() + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = "v" + + assert.NotNil(t, selfUpgrade(s.Ctx, []string{}, s.CmdCtx)) + }) + +} + +func TestSelfUpgradeRollback(t *testing.T) { + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = tempExt + goos = platformutil.Linux + t.Run("Successful rollback", func(t *testing.T) { + s := testutils.Setup() + var args = []string{rollBackSubCommand} + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = version + assert.Nil(t, selfUpgrade(s.Ctx, args, s.CmdCtx)) + }) + + t.Run("Successful rollback failed", func(t *testing.T) { + s := testutils.Setup() + var args = []string{rollBackSubCommand} + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = "v100.0.0" + assert.NotNil(t, selfUpgrade(s.Ctx, args, s.CmdCtx)) + }) + + t.Run("Successful rollback for windows", func(t *testing.T) { + s := testutils.Setup() + var args = []string{rollBackSubCommand} + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = version + goos = platformutil.Windows + assert.Nil(t, selfUpgrade(s.Ctx, args, s.CmdCtx)) + }) + + t.Run("Successful rollback for windows", func(t *testing.T) { + s := testutils.Setup() + var args = []string{rollBackSubCommand} + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = version + github.FlytectlReleaseConfig.OverrideExecutable = "/" + assert.Nil(t, selfUpgrade(s.Ctx, args, s.CmdCtx)) + }) + +} diff --git a/flytectl/cmd/version/version.go b/flytectl/cmd/version/version.go new file mode 100644 index 0000000000..dadbde6407 --- /dev/null +++ b/flytectl/cmd/version/version.go @@ -0,0 +1,116 @@ +package version + +import ( + "context" + "encoding/json" + "fmt" + "runtime" + + "github.com/flyteorg/flytectl/pkg/github" + + "github.com/flyteorg/flytectl/pkg/platformutil" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flytestdlib/logger" + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/spf13/cobra" +) + +// Long descriptions are whitespace sensitive when generating docs using sphinx. +const ( + versionCmdShort = `Fetches Flyte version` + versionCmdLong = ` +Fetch Flytectl version. +:: + + flytectl version +` + flytectlAppName = "flytectl" + controlPlanAppName = "controlPlane" +) + +type versionOutput struct { + // Specifies the Name of app + App string `json:"App,omitempty"` + // Specifies the git revision SHA1 of the build + Build string `json:"Build,omitempty"` + // Version for the build, should follow a semver + Version string `json:"Version,omitempty"` + // Build timestamp + BuildTime string `json:"BuildTime,omitempty"` +} + +// GetVersionCommand will return version command +func GetVersionCommand(rootCmd *cobra.Command) map[string]cmdCore.CommandEntry { + getResourcesFuncs := map[string]cmdCore.CommandEntry{ + "version": {CmdFunc: getVersion, Aliases: []string{"versions"}, ProjectDomainNotRequired: true, + Short: versionCmdShort, + Long: versionCmdLong}, + } + return getResourcesFuncs +} + +func getVersion(ctx context.Context, args []string, cmdCtx cmdCore.CommandContext) error { + goos := platformutil.Platform(runtime.GOOS) + version, err := github.FlytectlReleaseConfig.GetLatestVersion() + if err != nil { + logger.Error(ctx, "Unable to get the latest version because %v", err) + } else { + message, err := github.GetUpgradeMessage(version, goos) + if err != nil { + logger.Error(ctx, "Unable to detect a new version because %v", err) + } + if len(message) > 0 { + fmt.Println(message) + } + } + + // Print Flytectl + if err := printVersion(versionOutput{ + Build: stdlibversion.Build, + BuildTime: stdlibversion.BuildTime, + Version: stdlibversion.Version, + App: flytectlAppName, + }); err != nil { + return err + } + + // Print Flyteadmin version if available + if err := getControlPlaneVersion(ctx, cmdCtx); err != nil { + logger.Debug(ctx, err) + } + return nil +} + +func printVersion(response versionOutput) error { + b, err := json.MarshalIndent(response, "", " ") + if err != nil { + return err + } + fmt.Print(string(b)) + return nil +} + +func getControlPlaneVersion(ctx context.Context, cmdCtx cmdCore.CommandContext) error { + if cmdCtx.ClientSet() == nil { + logger.Debug(ctx, "Ignore talking to admin if host is not configured") + return nil + } + + v, err := cmdCtx.AdminClient().GetVersion(ctx, &admin.GetVersionRequest{}) + if err != nil || v == nil { + logger.Debugf(ctx, "Failed to get version of control plane %v: \n", err) + return err + } + // Print FlyteAdmin + if err := printVersion(versionOutput{ + Build: v.ControlPlaneVersion.Build, + BuildTime: v.ControlPlaneVersion.BuildTime, + Version: v.ControlPlaneVersion.Version, + App: controlPlanAppName, + }); err != nil { + return fmt.Errorf("Unable to get the control plane version. Please try again: %v", err) + } + return nil +} diff --git a/flytectl/cmd/version/version_test.go b/flytectl/cmd/version/version_test.go new file mode 100644 index 0000000000..f694089b9a --- /dev/null +++ b/flytectl/cmd/version/version_test.go @@ -0,0 +1,124 @@ +package version + +import ( + "context" + "errors" + "fmt" + "io" + "sort" + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + admin2 "github.com/flyteorg/flyte/flyteidl/clients/go/admin" + + "github.com/spf13/cobra" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + cmdCore "github.com/flyteorg/flytectl/cmd/core" + "github.com/stretchr/testify/assert" +) + +var ( + versionRequest = &admin.GetVersionRequest{} + testVersion = "v0.1.20" + versionResponse = &admin.GetVersionResponse{ + ControlPlaneVersion: &admin.Version{ + Build: "", + BuildTime: "", + Version: testVersion, + }, + } +) + +func TestVersionCommand(t *testing.T) { + rootCmd := &cobra.Command{ + Long: "Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service.", + Short: "Flytectl CLI tool", + Use: "flytectl", + DisableAutoGenTag: true, + } + versionCommand := GetVersionCommand(rootCmd) + cmdCore.AddCommands(rootCmd, versionCommand) + fmt.Println(rootCmd.Commands()) + assert.Equal(t, len(rootCmd.Commands()), 1) + cmdNouns := rootCmd.Commands() + // Sort by Use value. + sort.Slice(cmdNouns, func(i, j int) bool { + return cmdNouns[i].Use < cmdNouns[j].Use + }) + + assert.Equal(t, cmdNouns[0].Use, "version") + assert.Equal(t, cmdNouns[0].Short, versionCmdShort) + assert.Equal(t, cmdNouns[0].Long, versionCmdLong) +} + +func TestVersionCommandFunc(t *testing.T) { + ctx := context.Background() + s := testutils.Setup() + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = testVersion + s.MockClient.AdminClient().(*mocks.AdminServiceClient).OnGetVersionMatch(ctx, versionRequest).Return(versionResponse, nil) + err := getVersion(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.MockClient.AdminClient().(*mocks.AdminServiceClient).AssertCalled(t, "GetVersion", ctx, versionRequest) +} + +func TestVersionCommandFuncError(t *testing.T) { + ctx := context.Background() + s := testutils.Setup() + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = "v" + s.MockClient.AdminClient().(*mocks.AdminServiceClient).OnGetVersionMatch(ctx, versionRequest).Return(versionResponse, nil) + err := getVersion(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.MockClient.AdminClient().(*mocks.AdminServiceClient).AssertCalled(t, "GetVersion", ctx, versionRequest) +} + +func TestVersionCommandFuncErr(t *testing.T) { + ctx := context.Background() + s := testutils.Setup() + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = testVersion + s.MockAdminClient.OnGetVersionMatch(ctx, versionRequest).Return(versionResponse, errors.New("error")) + err := getVersion(s.Ctx, []string{}, s.CmdCtx) + assert.Nil(t, err) + s.MockAdminClient.AssertCalled(t, "GetVersion", ctx, versionRequest) +} + +func TestVersionUtilFunc(t *testing.T) { + stdlibversion.Build = "" + stdlibversion.BuildTime = "" + stdlibversion.Version = testVersion + t.Run("Error in getting control plan version", func(t *testing.T) { + ctx := context.Background() + mockClient := admin2.InitializeMockClientset() + adminClient := mockClient.AdminClient().(*mocks.AdminServiceClient) + mockOutStream := new(io.Writer) + cmdCtx := cmdCore.NewCommandContext(mockClient, *mockOutStream) + adminClient.OnGetVersionMatch(ctx, &admin.GetVersionRequest{}).Return(nil, fmt.Errorf("error")) + err := getControlPlaneVersion(ctx, cmdCtx) + assert.NotNil(t, err) + }) + t.Run("Failed in getting version", func(t *testing.T) { + ctx := context.Background() + mockClient := admin2.InitializeMockClientset() + adminClient := mockClient.AdminClient().(*mocks.AdminServiceClient) + mockOutStream := new(io.Writer) + cmdCtx := cmdCore.NewCommandContext(mockClient, *mockOutStream) + adminClient.OnGetVersionMatch(ctx, &admin.GetVersionRequest{}).Return(nil, fmt.Errorf("error")) + err := getVersion(ctx, []string{}, cmdCtx) + assert.Nil(t, err) + }) + t.Run("ClientSet is empty", func(t *testing.T) { + ctx := context.Background() + cmdCtx := cmdCore.CommandContext{} + err := getVersion(ctx, []string{}, cmdCtx) + assert.Nil(t, err) + }) +} diff --git a/flytectl/config.yaml b/flytectl/config.yaml new file mode 100644 index 0000000000..af21453aeb --- /dev/null +++ b/flytectl/config.yaml @@ -0,0 +1,10 @@ +admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///localhost:30080 + insecure: true + authType: Pkce +console: + endpoint: http://localhost:30080 +logger: + show-source: true + level: 0 diff --git a/flytectl/doc-requirements.in b/flytectl/doc-requirements.in new file mode 100644 index 0000000000..38b976f528 --- /dev/null +++ b/flytectl/doc-requirements.in @@ -0,0 +1,10 @@ +git+https://github.com/flyteorg/furo@main +sphinx +sphinx-prompt +sphinx-material +sphinx-code-include +sphinx-copybutton +sphinx_fontawesome +sphinxcontrib-youtube +sphinx-panels +sphinx-reredirects diff --git a/flytectl/doc-requirements.txt b/flytectl/doc-requirements.txt new file mode 100644 index 0000000000..8e5f0fda9f --- /dev/null +++ b/flytectl/doc-requirements.txt @@ -0,0 +1,112 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile doc-requirements.in +# +alabaster==0.7.13 + # via sphinx +babel==2.14.0 + # via sphinx +beautifulsoup4==4.12.2 + # via + # furo + # sphinx-code-include + # sphinx-material +certifi==2023.11.17 + # via requests +charset-normalizer==3.3.2 + # via requests +css-html-js-minify==2.5.5 + # via sphinx-material +docutils==0.17.1 + # via + # sphinx + # sphinx-panels +furo @ git+https://github.com/flyteorg/furo@main + # via -r doc-requirements.in +idna==3.6 + # via requests +imagesize==1.4.1 + # via sphinx +importlib-metadata==7.0.1 + # via sphinx +jinja2==3.1.3 + # via sphinx +lxml==5.1.0 + # via sphinx-material +markupsafe==2.1.3 + # via jinja2 +packaging==23.2 + # via sphinx +pygments==2.17.2 + # via + # furo + # sphinx + # sphinx-prompt +python-slugify[unidecode]==8.0.1 + # via sphinx-material +pytz==2023.3.post1 + # via babel +requests==2.31.0 + # via + # sphinx + # sphinxcontrib-youtube +six==1.16.0 + # via sphinx-code-include +snowballstemmer==2.2.0 + # via sphinx +soupsieve==2.5 + # via beautifulsoup4 +sphinx==4.5.0 + # via + # -r doc-requirements.in + # furo + # sphinx-basic-ng + # sphinx-code-include + # sphinx-copybutton + # sphinx-fontawesome + # sphinx-material + # sphinx-panels + # sphinx-prompt + # sphinx-reredirects + # sphinxcontrib-youtube +sphinx-basic-ng==1.0.0b2 + # via furo +sphinx-code-include==1.1.1 + # via -r doc-requirements.in +sphinx-copybutton==0.5.2 + # via -r doc-requirements.in +sphinx-fontawesome==0.0.6 + # via -r doc-requirements.in +sphinx-material==0.0.36 + # via -r doc-requirements.in +sphinx-panels==0.6.0 + # via -r doc-requirements.in +sphinx-prompt==1.5.0 + # via -r doc-requirements.in +sphinx-reredirects==0.1.3 + # via -r doc-requirements.in +sphinxcontrib-applehelp==1.0.4 + # via sphinx +sphinxcontrib-devhelp==1.0.2 + # via sphinx +sphinxcontrib-htmlhelp==2.0.1 + # via sphinx +sphinxcontrib-jsmath==1.0.1 + # via sphinx +sphinxcontrib-qthelp==1.0.3 + # via sphinx +sphinxcontrib-serializinghtml==1.1.5 + # via sphinx +sphinxcontrib-youtube==1.3.0 + # via -r doc-requirements.in +text-unidecode==1.3 + # via python-slugify +unidecode==1.3.8 + # via python-slugify +urllib3==2.1.0 + # via requests +zipp==3.17.0 + # via importlib-metadata +setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/flytectl/docs/Makefile b/flytectl/docs/Makefile new file mode 100644 index 0000000000..c43bf3516c --- /dev/null +++ b/flytectl/docs/Makefile @@ -0,0 +1,32 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = flytekit +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +gendocs: + go build -o ../bin/flytectl-docs $(SOURCEDIR)/generate_docs.go + rm -rf $(SOURCEDIR)/gen + mkdir gen + ../bin/flytectl-docs + mv gen $(SOURCEDIR) + make html + +clean: + rm -rf $(SOURCEDIR)/gen + rm -rf build/* + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/flytectl/docs/coverage.out b/flytectl/docs/coverage.out new file mode 100644 index 0000000000..5f02b11199 --- /dev/null +++ b/flytectl/docs/coverage.out @@ -0,0 +1 @@ +mode: set diff --git a/flytectl/docs/make.bat b/flytectl/docs/make.bat new file mode 100644 index 0000000000..47d656bb74 --- /dev/null +++ b/flytectl/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXPROJ=simpleble + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/flytectl/docs/source/_static/custom.css b/flytectl/docs/source/_static/custom.css new file mode 100644 index 0000000000..96f45d4066 --- /dev/null +++ b/flytectl/docs/source/_static/custom.css @@ -0,0 +1,16 @@ +html .tabbed-set > label { + color: var(--color-foreground-border); +} + +html .tabbed-set > input:checked + label { + border-color: var(--color-link); + color: var(--color-link); +} + +html .tabbed-set > label:hover { + color: var(--color-link); +} + +html .tabbed-content { + box-shadow: 0 -.0625rem var(--color-background-border),0 .0625rem var(--color-background-border); +} \ No newline at end of file diff --git a/flytectl/docs/source/_templates/sidebar/brand.html b/flytectl/docs/source/_templates/sidebar/brand.html new file mode 100644 index 0000000000..a170d6c6d1 --- /dev/null +++ b/flytectl/docs/source/_templates/sidebar/brand.html @@ -0,0 +1,18 @@ + diff --git a/flytectl/docs/source/cluster-resource-attribute.rst b/flytectl/docs/source/cluster-resource-attribute.rst new file mode 100644 index 0000000000..c4b89458c8 --- /dev/null +++ b/flytectl/docs/source/cluster-resource-attribute.rst @@ -0,0 +1,11 @@ +Cluster resource attribute +-------------------------- +It specifies the actions to be performed on the 'cluster-resource-attribute'. + +.. toctree:: + :maxdepth: 1 + :caption: Cluster resource attribute + + gen/flytectl_get_cluster-resource-attribute + gen/flytectl_delete_cluster-resource-attribute + gen/flytectl_update_cluster-resource-attribute diff --git a/flytectl/docs/source/conf.py b/flytectl/docs/source/conf.py new file mode 100644 index 0000000000..2f473ff5b5 --- /dev/null +++ b/flytectl/docs/source/conf.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/stable/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import re + + +# -- Project information ----------------------------------------------------- + +project = "Flytectl" +copyright = "2021, Flyte" +author = "Flyte" + +# The full version, including alpha/beta/rc tags +release = re.sub("^v", "", os.popen("git describe").read().strip()) +version = release + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autosummary", + "sphinx.ext.autosectionlabel", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx-prompt", + "sphinx_copybutton", + "sphinx_fontawesome", + "sphinxcontrib.youtube", + "sphinx_reredirects", + "sphinx_panels", +] + +# build the templated autosummary files +autosummary_generate = True + +# autosectionlabel throws warnings if section names are duplicated. +# The following tells autosectionlabel to not throw a warning for +# duplicated section names that are in different documents. +autosectionlabel_prefix_document = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = [ + u"_build", + "Thumbs.db", + ".DS_Store", + "docs_index.rst", + "overview.rst", +] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "tango" +pygments_dark_style = "native" + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "furo" +html_title = "Flyte" +html_logo = "flyte_circle_gradient_1_4x4.png" +html_favicon = "flyte_circle_gradient_1_4x4.png" + +announcement = """ +๐Ÿ“ข This is the old documentation for Flyte. +Please visit the new documentation here. +""" + +html_theme_options = { + "light_css_variables": { + "color-brand-primary": "#4300c9", + "color-brand-content": "#4300c9", + "color-announcement-background": "#FEE7B8", + "color-announcement-text": "#535353", + }, + "dark_css_variables": { + "color-brand-primary": "#9D68E4", + "color-brand-content": "#9D68E4", + "color-announcement-background": "#493100", + }, + "announcement": announcement, +} + +html_context = { + "home_page": "https://docs.flyte.org", + # custom flyteorg furo theme options + "github_repo": "flytectl", + "github_username": "flyteorg", + "github_commit": "master", + "docs_path": "docs/source", # path to documentation source +} + +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]} + + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = ["custom.css"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "flytectldoc" + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "flytectl.tex", "flytectl Documentation", "Flyte", "manual"), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "flytectl", "flytectl Documentation", [author], 1)] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "flytectl", + "flytectl Documentation", + author, + "flytectl", + "The one CLI for flyte.", + "Miscellaneous", + ), +] + +# -- Options for intersphinx ------------------------------------------------- +# intersphinx configuration +intersphinx_mapping = { + "flyteidl": ("https://docs.flyte.org/en/latest/reference_flyteidl.html", None), + "flyte": ("https://docs.flyte.org/en/latest", None), +} + +if int(os.environ.get("ENABLE_SPHINX_REDIRECTS", 0)): + # Redirects to the new docs site + redirects = { + "verbs.html": "https://docs.flyte.org/en/latest/flytectl/verbs.html", + "nouns.html": "https://docs.flyte.org/en/latest/flytectl/nouns.html", + "gen/*": "https://docs.flyte.org/en/latest/flytectl/$source.html", + "contribute.html": "https://docs.flyte.org/en/latest/flytectl/contribute.html", + } diff --git a/flytectl/docs/source/config.rst b/flytectl/docs/source/config.rst new file mode 100644 index 0000000000..60dd45ea3c --- /dev/null +++ b/flytectl/docs/source/config.rst @@ -0,0 +1,12 @@ +Config +------ +It specifies the actions to be performed on the resource 'config'. + +.. toctree:: + :maxdepth: 1 + :caption: Config + + gen/flytectl_config_validate + gen/flytectl_config_init + gen/flytectl_config_docs + gen/flytectl_config_discover diff --git a/flytectl/docs/source/contribute.rst b/flytectl/docs/source/contribute.rst new file mode 100644 index 0000000000..f5bea5d584 --- /dev/null +++ b/flytectl/docs/source/contribute.rst @@ -0,0 +1,73 @@ +########################### +Contributing Guide +########################### + +First off, thank you for thinking about contributing! +Here are the instructions that will guide you through contributing, fixing, and improving Flytectl. + +๐Ÿ“ Contribute to Documentation +============================== + +Docs are generated using Sphinx and are available at [flytectl.rtfd.io](https://flytectl.rtfd.io). + +To update the documentation, follow these steps: + +1. Install the requirements by running ``pip install -r doc-requirements.txt`` in the root folder. +2. Make modifications in the `cmd `__ folder. +3. Run ``make gendocs`` from within the `docs `__ folder. +4. Open html files produced by Sphinx in your browser to verify if the changes look as expected (html files can be found in the ``docs/build/html`` folder). + +๐Ÿ’ป Contribute Code +================== + +1. Run ``make compile`` in the root directory to compile the code. +2. Set up a local cluster by running ``./bin/flytectl sandbox start`` in the root directory. +3. Run ``flytectl get project`` to see if things are working. +4. Run the command you want to test in the terminal. +5. If you want to update the command (add additional options, change existing options, etc.): + + * Navigate to `cmd `__ directory + * Each sub-directory points to a command, e.g., ``create`` points to ``flytectl create ...`` + * Here are the directories you can navigate to: + + .. list-table:: Flytectl cmd directories + :widths: 25 25 50 + :header-rows: 1 + + * - Directory + - Command + - Description + * - ``config`` + - ``flytectl config ...`` + - Common package for all commands; has root flags + * - ``configuration`` + - ``flytectl configuration ...`` + - Validates/generates Flytectl config + * - ``create`` + - ``flytectl create ...`` + - Creates a project/execution + * - ``delete`` + - ``flytectl delete ...`` + - Aborts an execution and deletes the resource attributes + * - ``get`` + - ``flytectl get ...`` + - Gets a task/workflow/launchplan/execution/project/resource attribute + * - ``register`` + - ``flytectl register ...`` + - Registers a task/workflow/launchplan + * - ``sandbox`` + - ``flytectl sandbox ...`` + - Interacts with sandbox + * - ``update`` + - ``flytectl update ...`` + - Updates a project/launchplan/resource attribute + * - ``upgrade`` + - ``flytectl upgrade ...`` + - Upgrades/rollbacks Flytectl version + * - ``version`` + - ``flytectl version ...`` + - Fetches Flytectl version + + Find all the Flytectl commands :ref:`here `. + * Run appropriate tests to view the changes by running ``go test ./... -race -coverprofile=coverage.txt -covermode=atomic -v`` in the root directory. + diff --git a/flytectl/docs/source/demo.rst b/flytectl/docs/source/demo.rst new file mode 100644 index 0000000000..ce9a3b00e1 --- /dev/null +++ b/flytectl/docs/source/demo.rst @@ -0,0 +1,13 @@ +Demo +------- +These are the actions which can be performed on the 'demo' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Demo + + gen/flytectl_demo_start + gen/flytectl_demo_status + gen/flytectl_demo_teardown + gen/flytectl_demo_exec + gen/flytectl_demo_reload diff --git a/flytectl/docs/source/docs_index.rst b/flytectl/docs/source/docs_index.rst new file mode 100644 index 0000000000..f57f41f530 --- /dev/null +++ b/flytectl/docs/source/docs_index.rst @@ -0,0 +1,12 @@ +********************** +FlyteCTL API Reference +********************** + +.. toctree:: + :maxdepth: 2 + + Overview + CLI Entrypoint + verbs + nouns + contribute diff --git a/flytectl/docs/source/examples.rst b/flytectl/docs/source/examples.rst new file mode 100644 index 0000000000..35993115b4 --- /dev/null +++ b/flytectl/docs/source/examples.rst @@ -0,0 +1,10 @@ +Examples +--------- +It specifies the actions to be performed on the 'examples' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Examples + + + gen/flytectl_register_examples diff --git a/flytectl/docs/source/execution-cluster-label.rst b/flytectl/docs/source/execution-cluster-label.rst new file mode 100644 index 0000000000..9daa792840 --- /dev/null +++ b/flytectl/docs/source/execution-cluster-label.rst @@ -0,0 +1,11 @@ +Execution cluster label +----------------------- +It specifies the actions to be performed on the 'execution-cluster-label' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Execution cluster label + + gen/flytectl_get_execution-cluster-label + gen/flytectl_update_execution-cluster-label + gen/flytectl_delete_execution-cluster-label diff --git a/flytectl/docs/source/execution-queue-attribute.rst b/flytectl/docs/source/execution-queue-attribute.rst new file mode 100644 index 0000000000..cbffa7537b --- /dev/null +++ b/flytectl/docs/source/execution-queue-attribute.rst @@ -0,0 +1,11 @@ +Execution queue attribute +-------------------------- +It specifies the actions to be performed on the 'execution-queue-attribute' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Execution queue attribute + + gen/flytectl_get_execution-queue-attribute + gen/flytectl_delete_execution-queue-attribute + gen/flytectl_update_execution-queue-attribute diff --git a/flytectl/docs/source/execution.rst b/flytectl/docs/source/execution.rst new file mode 100644 index 0000000000..6acfc7e3e8 --- /dev/null +++ b/flytectl/docs/source/execution.rst @@ -0,0 +1,12 @@ +Execution +--------- +It specifies the actions to be performed on the 'execution' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Execution + + gen/flytectl_create_execution + gen/flytectl_get_execution + gen/flytectl_update_execution + gen/flytectl_delete_execution diff --git a/flytectl/docs/source/files.rst b/flytectl/docs/source/files.rst new file mode 100644 index 0000000000..f02fb03950 --- /dev/null +++ b/flytectl/docs/source/files.rst @@ -0,0 +1,11 @@ +Files +------ +It specifies the actions to be performed on the 'file' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Files + + gen/flytectl_register_files + +Note: It allows the user to register local files. diff --git a/flytectl/docs/source/flyte_circle_gradient_1_4x4.png b/flytectl/docs/source/flyte_circle_gradient_1_4x4.png new file mode 100644 index 0000000000..49cdbbbc34 Binary files /dev/null and b/flytectl/docs/source/flyte_circle_gradient_1_4x4.png differ diff --git a/flytectl/docs/source/gen/flytectl.rst b/flytectl/docs/source/gen/flytectl.rst new file mode 100644 index 0000000000..9109b234bd --- /dev/null +++ b/flytectl/docs/source/gen/flytectl.rst @@ -0,0 +1,99 @@ +.. _flytectl: + +flytectl +-------- + +Flytectl CLI tool + +Synopsis +~~~~~~~~ + + +Flytectl is a CLI tool written in Go to interact with the FlyteAdmin service. + +Options +~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + -h, --help help for flytectl + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_compile` - Validate flyte packages without registration needed. +* :doc:`flytectl_completion` - Generates completion script. +* :doc:`flytectl_config` - Runs various config commands, look at the help of this command to get a list of available commands.. +* :doc:`flytectl_create` - Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects. +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. +* :doc:`flytectl_register` - Registers tasks, workflows, and launch plans from a list of generated serialized files. +* :doc:`flytectl_sandbox` - Helps with sandbox interactions like start, teardown, status, and exec. +* :doc:`flytectl_update` - Update Flyte resources e.g., project. +* :doc:`flytectl_upgrade` - Upgrades/rollbacks to a Flyte version. +* :doc:`flytectl_version` - Fetches Flyte version + diff --git a/flytectl/docs/source/gen/flytectl_compile.rst b/flytectl/docs/source/gen/flytectl_compile.rst new file mode 100644 index 0000000000..6ce685df5b --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_compile.rst @@ -0,0 +1,113 @@ +.. _flytectl_compile: + +flytectl compile +---------------- + +Validate flyte packages without registration needed. + +Synopsis +~~~~~~~~ + + + +Validate workflows by compiling flyte's serialized protobuf files (task, workflows and launch plans). This is useful for testing workflows and tasks without neededing to talk with a flyte cluster. + +:: + + flytectl compile --file my-flyte-package.tgz + +:: + + flytectl compile --file /home/user/dags/my-flyte-package.tgz + +.. note:: + Input file is a path to a tgz. This file is generated by either pyflyte or jflyte. tgz file contains protobuf files describing workflows, tasks and launch plans. + + + +:: + + flytectl compile [flags] + +Options +~~~~~~~ + +:: + + --file string Path to a flyte package file. Flyte packages are tgz files generated by pyflyte or jflyte. + -h, --help help for compile + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool + diff --git a/flytectl/docs/source/gen/flytectl_completion.rst b/flytectl/docs/source/gen/flytectl_completion.rst new file mode 100644 index 0000000000..5671d3453b --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_completion.rst @@ -0,0 +1,160 @@ +.. _flytectl_completion: + +flytectl completion +------------------- + +Generates completion script. + +Synopsis +~~~~~~~~ + + +To load completion, run the following commands in accordance with the shell you are using: + +- Bash + :: + + $ source <(flytectl completion bash) + + To load completions for each session: + + - Linux + + :: + + $ flytectl completion bash > /etc/bash_completion.d/flytectl + + - macOS + + :: + + $ flytectl completion bash > /usr/local/etc/bash_completion.d/flytectl + +- Zsh + If shell completion is not already enabled in your environment, enable it: + + :: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + Once enabled, execute once: + + :: + + $ flytectl completion zsh > "${fpath[1]}/_flytectl" + + .. note:: + Start a new shell for this setup to take effect. + +- fish + + :: + + $ flytectl completion fish | source + + To load completions for each session, run: + + :: + + $ flytectl completion fish > ~/.config/fish/completions/flytectl.fish + +- PowerShell + + :: + + PS> flytectl completion powershell | Out-String | Invoke-Expression + + To load completions for each session, run: + + :: + + PS> flytectl completion powershell > flytectl.ps1 + + and source this file from your PowerShell profile. + + +:: + + flytectl completion [bash|zsh|fish|powershell] + +Options +~~~~~~~ + +:: + + -h, --help help for completion + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool + diff --git a/flytectl/docs/source/gen/flytectl_config.rst b/flytectl/docs/source/gen/flytectl_config.rst new file mode 100644 index 0000000000..be0ef53651 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_config.rst @@ -0,0 +1,101 @@ +.. _flytectl_config: + +flytectl config +--------------- + +Runs various config commands, look at the help of this command to get a list of available commands.. + +Synopsis +~~~~~~~~ + + +Runs various config commands, look at the help of this command to get a list of available commands.. + +Options +~~~~~~~ + +:: + + --file stringArray Passes the config file to load. + If empty, it'll first search for the config file path then, if found, will load config from there. + --force Force to overwrite the default config file without confirmation + -h, --help help for config + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_config_discover` - Searches for a config in one of the default search paths. +* :doc:`flytectl_config_docs` - Generate configuration documentation in rst format +* :doc:`flytectl_config_init` - Generates a Flytectl config file in the user's home directory. +* :doc:`flytectl_config_validate` - Validates the loaded config. + diff --git a/flytectl/docs/source/gen/flytectl_config_discover.rst b/flytectl/docs/source/gen/flytectl_config_discover.rst new file mode 100644 index 0000000000..6727e00da8 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_config_discover.rst @@ -0,0 +1,100 @@ +.. _flytectl_config_discover: + +flytectl config discover +------------------------ + +Searches for a config in one of the default search paths. + +Synopsis +~~~~~~~~ + + +Searches for a config in one of the default search paths. + +:: + + flytectl config discover [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for discover + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --file stringArray Passes the config file to load. + If empty, it'll first search for the config file path then, if found, will load config from there. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_config` - Runs various config commands, look at the help of this command to get a list of available commands.. + diff --git a/flytectl/docs/source/gen/flytectl_config_docs.rst b/flytectl/docs/source/gen/flytectl_config_docs.rst new file mode 100644 index 0000000000..06d0969e83 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_config_docs.rst @@ -0,0 +1,100 @@ +.. _flytectl_config_docs: + +flytectl config docs +-------------------- + +Generate configuration documentation in rst format + +Synopsis +~~~~~~~~ + + +Generate configuration documentation in rst format + +:: + + flytectl config docs [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for docs + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --file stringArray Passes the config file to load. + If empty, it'll first search for the config file path then, if found, will load config from there. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_config` - Runs various config commands, look at the help of this command to get a list of available commands.. + diff --git a/flytectl/docs/source/gen/flytectl_config_init.rst b/flytectl/docs/source/gen/flytectl_config_init.rst new file mode 100644 index 0000000000..ea2a964d67 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_config_init.rst @@ -0,0 +1,136 @@ +.. _flytectl_config_init: + +flytectl config init +-------------------- + +Generates a Flytectl config file in the user's home directory. + +Synopsis +~~~~~~~~ + + +Creates a Flytectl config file in Flyte directory i.e ~/.flyte. + +Generate Sandbox config: +:: + + flytectl config init + +Flyte Sandbox is a fully standalone minimal environment for running Flyte. +Read more about the Sandbox deployment :ref:`here `. + +Generate remote cluster config: +:: + + flytectl config init --host=flyte.myexample.com + +By default, the connection is secure. +Read more about remote deployment :ref:`here `. + +Generate remote cluster config with insecure connection: +:: + + flytectl config init --host=flyte.myexample.com --insecure + + Generate remote cluster config with separate console endpoint: + :: + + flytectl config init --host=flyte.myexample.com --console=console.myexample.com + +Generate Flytectl config with a storage provider: +:: + + flytectl config init --host=flyte.myexample.com --storage + + +:: + + flytectl config init [flags] + +Options +~~~~~~~ + +:: + + --console string Endpoint of console, if different than flyte admin + --force Force to overwrite the default config file without confirmation + -h, --help help for init + --host string Endpoint of flyte admin + --insecure Enable insecure mode + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --file stringArray Passes the config file to load. + If empty, it'll first search for the config file path then, if found, will load config from there. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_config` - Runs various config commands, look at the help of this command to get a list of available commands.. + diff --git a/flytectl/docs/source/gen/flytectl_config_validate.rst b/flytectl/docs/source/gen/flytectl_config_validate.rst new file mode 100644 index 0000000000..41a5511b11 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_config_validate.rst @@ -0,0 +1,102 @@ +.. _flytectl_config_validate: + +flytectl config validate +------------------------ + +Validates the loaded config. + +Synopsis +~~~~~~~~ + + +Validates the loaded config. + +:: + + flytectl config validate [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for validate + --strict Validates that all keys in loaded config + map to already registered sections. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --file stringArray Passes the config file to load. + If empty, it'll first search for the config file path then, if found, will load config from there. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_config` - Runs various config commands, look at the help of this command to get a list of available commands.. + diff --git a/flytectl/docs/source/gen/flytectl_create.rst b/flytectl/docs/source/gen/flytectl_create.rst new file mode 100644 index 0000000000..8827dc20e9 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_create.rst @@ -0,0 +1,101 @@ +.. _flytectl_create: + +flytectl create +--------------- + +Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + +Synopsis +~~~~~~~~ + + + +Create Flyte resource; if a project: +:: + + flytectl create project --file project.yaml + + +Options +~~~~~~~ + +:: + + -h, --help help for create + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_create_execution` - Creates execution resources. +* :doc:`flytectl_create_project` - Creates project resources. + diff --git a/flytectl/docs/source/gen/flytectl_create_execution.rst b/flytectl/docs/source/gen/flytectl_create_execution.rst new file mode 100644 index 0000000000..a51529dcbd --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_create_execution.rst @@ -0,0 +1,256 @@ +.. _flytectl_create_execution: + +flytectl create execution +------------------------- + +Creates execution resources. + +Synopsis +~~~~~~~~ + + + +Create execution resources for a given workflow or task in a project and domain. + +There are three steps to generate an execution, as outlined below: + +1. Generate the execution spec file using the :ref:`get task ` command. +:: + + flytectl get tasks -d development -p flytesnacks core.control_flow.merge_sort.merge --version v2 --execFile execution_spec.yaml + +The generated file would look similar to the following: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +2. [Optional] Update the inputs for the execution, if needed. +The generated spec file can be modified to change the input values, as shown below: + +.. code-block:: yaml + + iamRoleARN: 'arn:aws:iam::12345678:role/defaultrole' + inputs: + sorted_list1: + - 2 + - 4 + - 6 + sorted_list2: + - 1 + - 3 + - 5 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +3. [Optional] Update the envs for the execution, if needed. +The generated spec file can be modified to change the envs values, as shown below: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + envs: + foo: bar + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: "v2" + +4. Run the execution by passing the generated YAML file. +The file can then be passed through the command line. +It is worth noting that the source's and target's project and domain can be different. +:: + + flytectl create execution --execFile execution_spec.yaml -p flytesnacks -d staging --targetProject flytesnacks + +5. To relaunch an execution, pass the current execution ID as follows: + +:: + + flytectl create execution --relaunch ffb31066a0f8b4d52b77 -p flytesnacks -d development + +6. To recover an execution, i.e., recreate it from the last known failure point for previously-run workflow execution, run: + +:: + + flytectl create execution --recover ffb31066a0f8b4d52b77 -p flytesnacks -d development + +See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. + +7. You can create executions idempotently by naming them. This is also a way to *name* an execution for discovery. Note, +an execution id has to be unique within a project domain. So if the *name* matches an existing execution an already exists exceptioj +will be raised. + +:: + + flytectl create execution --recover ffb31066a0f8b4d52b77 -p flytesnacks -d development custom_name + +8. Generic/Struct/Dataclass/JSON types are supported for execution in a similar manner. +The following is an example of how generic data can be specified while creating the execution. + +:: + + flytectl get task -d development -p flytesnacks core.type_system.custom_objects.add --execFile adddatanum.yaml + +The generated file would look similar to this. Here, empty values have been dumped for generic data types 'x' and 'y'. +:: + + iamRoleARN: "" + inputs: + "x": {} + "y": {} + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.type_system.custom_objects.add + version: v3 + +9. Modified file with struct data populated for 'x' and 'y' parameters for the task "core.type_system.custom_objects.add": + +:: + + iamRoleARN: "arn:aws:iam::123456789:role/dummy" + inputs: + "x": + "x": 2 + "y": ydatafory + "z": + 1 : "foo" + 2 : "bar" + "y": + "x": 3 + "y": ydataforx + "z": + 3 : "buzz" + 4 : "lightyear" + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.type_system.custom_objects.add + version: v3 + +10. If you have configured a plugin that implements github.com/flyteorg/flyteadmin/pkg/workflowengine/interfaces/WorkflowExecutor + that supports cluster pools, then when creating a new execution, you can assign it to a specific cluster pool: + +:: + + flytectl create execution --execFile execution_spec.yaml -p flytesnacks -d development --clusterPool my-gpu-cluster + + +:: + + flytectl create execution [flags] + +Options +~~~~~~~ + +:: + + --clusterPool string specify which cluster pool to assign execution to. + --dryRun execute command without making any modifications. + --execFile string file for the execution params. If not specified defaults to <_name>.execution_spec.yaml + -h, --help help for execution + --iamRoleARN string iam role ARN AuthRole for launching execution. + --kubeServiceAcct string kubernetes service account AuthRole for launching execution. + --overwriteCache skip cached results when performing execution, causing all outputs to be re-calculated and stored data to be overwritten. Does not work for recovered executions. + --recover string execution id to be recreated from the last known failure point. + --relaunch string execution id to be relaunched. + --targetDomain string project where execution needs to be created. If not specified configured domain would be used. + --targetProject string project where execution needs to be created. If not specified configured project would be used. + --task string + --version string specify version of execution workflow/task. + --workflow string + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_create` - Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_create_project.rst b/flytectl/docs/source/gen/flytectl_create_project.rst new file mode 100644 index 0000000000..16dedc8af4 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_create_project.rst @@ -0,0 +1,136 @@ +.. _flytectl_create_project: + +flytectl create project +----------------------- + +Creates project resources. + +Synopsis +~~~~~~~~ + + + +Create a project given its name and id. + +:: + + flytectl create project --name flytesnacks --id flytesnacks --description "flytesnacks description" --labels app=flyte + +.. note:: + The terms project/projects are interchangeable in these commands. + +Create a project by definition file. + +:: + + flytectl create project --file project.yaml + +.. code-block:: yaml + + id: "project-unique-id" + name: "Name" + labels: + values: + app: flyte + description: "Some description for the project." + +.. note:: + The project name shouldn't contain any whitespace characters. + + +:: + + flytectl create project [flags] + +Options +~~~~~~~ + +:: + + --activate Activates the project specified as argument. Only used in update + --activateProject (Deprecated) Activates the project specified as argument. Only used in update + --archive Archives the project specified as argument. Only used in update + --archiveProject (Deprecated) Archives the project specified as argument. Only used in update + --description string description for the project specified as argument. + --dryRun execute command without making any modifications. + --file string file for the project definition. + --force Skips asking for an acknowledgement during an update operation. Only used in update + -h, --help help for project + --id string id for the project specified as argument. + --labels stringToString labels for the project specified as argument. (default []) + --name string name for the project specified as argument. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_create` - Creates various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_delete.rst b/flytectl/docs/source/gen/flytectl_delete.rst new file mode 100644 index 0000000000..485404ade9 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete.rst @@ -0,0 +1,106 @@ +.. _flytectl_delete: + +flytectl delete +--------------- + +Terminates/deletes various Flyte resources such as executions and resource attributes. + +Synopsis +~~~~~~~~ + + + +Delete a resource; if an execution: +:: + + flytectl delete execution kxd1i72850 -d development -p flytesnacks + + +Options +~~~~~~~ + +:: + + -h, --help help for delete + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_delete_cluster-resource-attribute` - Deletes matchable resources of cluster attributes. +* :doc:`flytectl_delete_execution` - Terminates/deletes execution resources. +* :doc:`flytectl_delete_execution-cluster-label` - Deletes matchable resources of execution cluster label. +* :doc:`flytectl_delete_execution-queue-attribute` - Deletes matchable resources of execution queue attributes. +* :doc:`flytectl_delete_plugin-override` - Deletes matchable resources of plugin overrides. +* :doc:`flytectl_delete_task-resource-attribute` - Deletes matchable resources of task attributes. +* :doc:`flytectl_delete_workflow-execution-config` - Deletes matchable resources of workflow execution config. + diff --git a/flytectl/docs/source/gen/flytectl_delete_cluster-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_delete_cluster-resource-attribute.rst new file mode 100644 index 0000000000..5264b9046e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_cluster-resource-attribute.rst @@ -0,0 +1,134 @@ +.. _flytectl_delete_cluster-resource-attribute: + +flytectl delete cluster-resource-attribute +------------------------------------------ + +Deletes matchable resources of cluster attributes. + +Synopsis +~~~~~~~~ + + + +Delete cluster resource attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete cluster-resource-attribute -p flytesnacks -d development + + +To delete cluster resource attribute using the config file that was used to create it, run: + +:: + + flytectl delete cluster-resource-attribute --attrFile cra.yaml + +For example, here's the config file cra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +Attributes are optional in the file, which are unread during the 'delete' command but can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete cluster resource attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run: + +:: + + flytectl delete cluster-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete cluster-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for cluster-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_execution-cluster-label.rst b/flytectl/docs/source/gen/flytectl_delete_execution-cluster-label.rst new file mode 100644 index 0000000000..3314aba82e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_execution-cluster-label.rst @@ -0,0 +1,131 @@ +.. _flytectl_delete_execution-cluster-label: + +flytectl delete execution-cluster-label +--------------------------------------- + +Deletes matchable resources of execution cluster label. + +Synopsis +~~~~~~~~ + + + +Delete execution cluster label for a given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete execution-cluster-label -p flytesnacks -d development + +To delete execution cluster label using the config file that was used to create it, run: + +:: + + flytectl delete execution-cluster-label --attrFile ecl.yaml + +For example, here's the config file ecl.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +Value is optional in the file as it is unread during the delete command, but it can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete the execution cluster label of the workflow 'core.control_flow.merge_sort.merge_sort', run the following: + +:: + + flytectl delete execution-cluster-label -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete execution-cluster-label [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for execution-cluster-label + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_execution-queue-attribute.rst b/flytectl/docs/source/gen/flytectl_delete_execution-queue-attribute.rst new file mode 100644 index 0000000000..57bd1c6d04 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_execution-queue-attribute.rst @@ -0,0 +1,135 @@ +.. _flytectl_delete_execution-queue-attribute: + +flytectl delete execution-queue-attribute +----------------------------------------- + +Deletes matchable resources of execution queue attributes. + +Synopsis +~~~~~~~~ + + + +Delete execution queue attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete execution-queue-attribute -p flytesnacks -d development + +Delete execution queue attribute using the config file which was used to create it. + +:: + + flytectl delete execution-queue-attribute --attrFile era.yaml + +For example, here's the config file era.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +Value is optional in the file as it is unread during the delete command but it can be retained since the same file can be used for get, update and delete commands. + +To delete the execution queue attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete execution-queue-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete execution-queue-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for execution-queue-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_execution.rst b/flytectl/docs/source/gen/flytectl_delete_execution.rst new file mode 100644 index 0000000000..c336e66abf --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_execution.rst @@ -0,0 +1,142 @@ +.. _flytectl_delete_execution: + +flytectl delete execution +------------------------- + +Terminates/deletes execution resources. + +Synopsis +~~~~~~~~ + + + +Task executions can be aborted only if they are in non-terminal state. If they are FAILED, ABORTED, or SUCCEEDED, calling terminate on them has no effect. +Terminate a single execution with its name: + +:: + + flytectl delete execution c6a51x2l9e -d development -p flytesnacks + +.. note:: + The terms execution/executions are interchangeable in these commands. + +Get an execution to check its state: + +:: + + flytectl get execution -d development -p flytesnacks + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | NAME (7) | WORKFLOW NAME | TYPE | PHASE | STARTED | ELAPSED TIME | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | c6a51x2l9e | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:13:04.680476300Z | 15.540361300s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + +Terminate multiple executions with their names: +:: + + flytectl delete execution eeam9s8sny p4wv4hwgc4 -d development -p flytesnacks + +Get an execution to find the state of previously terminated executions: + +:: + + flytectl get execution -d development -p flytesnacks + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | NAME (7) | WORKFLOW NAME | TYPE | PHASE | STARTED | ELAPSED TIME | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | c6a51x2l9e | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:13:04.680476300Z | 15.540361300s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | eeam9s8sny | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:14:04.803084100Z | 42.306385500s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + | p4wv4hwgc4 | recipes.core.basic.lp.go_greet | WORKFLOW | ABORTED | 2021-02-17T08:14:27.476307400Z | 19.727504400s | + ------------ ------------------------------------------------------------------------- ---------- ----------- -------------------------------- --------------- + +Usage + + +:: + + flytectl delete execution [flags] + +Options +~~~~~~~ + +:: + + --dryRun execute command without making any modifications. + -h, --help help for execution + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_plugin-override.rst b/flytectl/docs/source/gen/flytectl_delete_plugin-override.rst new file mode 100644 index 0000000000..58e26d4457 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_plugin-override.rst @@ -0,0 +1,136 @@ +.. _flytectl_delete_plugin-override: + +flytectl delete plugin-override +------------------------------- + +Deletes matchable resources of plugin overrides. + +Synopsis +~~~~~~~~ + + + +Delete plugin override for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete plugin-override -p flytesnacks -d development + + +To delete plugin override using the config file which was used to create it, run: +:: + + flytectl delete plugin-override --attrFile po.yaml + +For example, here's the config file po.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +Overrides are optional in the file as they are unread during the delete command but can be retained since the same file can be used for get, update and delete commands. + +To delete plugin override for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete plugin-override -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete plugin-override [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for plugin-override + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_task-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_delete_task-resource-attribute.rst new file mode 100644 index 0000000000..f523a7717e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_task-resource-attribute.rst @@ -0,0 +1,136 @@ +.. _flytectl_delete_task-resource-attribute: + +flytectl delete task-resource-attribute +--------------------------------------- + +Deletes matchable resources of task attributes. + +Synopsis +~~~~~~~~ + + + +Delete task resource attributes for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete task-resource-attribute -p flytesnacks -d development + +To delete task resource attribute using the config file which was used to create it, run: + +:: + + flytectl delete task-resource-attribute --attrFile tra.yaml + +For example, here's the config file tra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +The defaults/limits are optional in the file as they are unread during the delete command, but can be retained since the same file can be used for 'get', 'update' and 'delete' commands. + +To delete task resource attribute for the workflow 'core.control_flow.merge_sort.merge_sort', run the following command: + +:: + + flytectl delete task-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete task-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for task-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_delete_workflow-execution-config.rst b/flytectl/docs/source/gen/flytectl_delete_workflow-execution-config.rst new file mode 100644 index 0000000000..389dad93be --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_delete_workflow-execution-config.rst @@ -0,0 +1,134 @@ +.. _flytectl_delete_workflow-execution-config: + +flytectl delete workflow-execution-config +----------------------------------------- + +Deletes matchable resources of workflow execution config. + +Synopsis +~~~~~~~~ + + + +Delete workflow execution config for the given project and domain combination or additionally the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl delete workflow-execution-config -p flytesnacks -d development + +To delete workflow execution config using the config file which was used to create it, run: + +:: + + flytectl delete workflow-execution-config --attrFile wec.yaml + +For example, here's the config file wec.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: demo + +Max_parallelism is optional in the file as it is unread during the delete command but can be retained since the same file can be used for get, update and delete commands. + +To delete workflow execution config for the workflow 'core.control_flow.merge_sort.merge_sort', run: + +:: + + flytectl delete workflow-execution-config -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Usage + + +:: + + flytectl delete workflow-execution-config [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for delete attribute for the resource type. + --dryRun execute command without making any modifications. + -h, --help help for workflow-execution-config + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_delete` - Terminates/deletes various Flyte resources such as executions and resource attributes. + diff --git a/flytectl/docs/source/gen/flytectl_demo.rst b/flytectl/docs/source/gen/flytectl_demo.rst new file mode 100644 index 0000000000..2176c7b95d --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo.rst @@ -0,0 +1,122 @@ +.. _flytectl_demo: + +flytectl demo +------------- + +Helps with demo interactions like start, teardown, status, and exec. + +Synopsis +~~~~~~~~ + + + +Flyte Demo is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte demo as a single Docker container locally. + +To create a demo cluster, run: +:: + + flytectl demo start + +To remove a demo cluster, run: +:: + + flytectl demo teardown + +To check the status of the demo container, run: +:: + + flytectl demo status + +To execute commands inside the demo container, use exec: +:: + + flytectl demo exec -- pwd + + +Options +~~~~~~~ + +:: + + -h, --help help for demo + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_demo_exec` - Executes non-interactive command inside the demo container +* :doc:`flytectl_demo_reload` - Power cycle the Flyte executable pod, effectively picking up an updated config. +* :doc:`flytectl_demo_start` - Starts the Flyte demo cluster. +* :doc:`flytectl_demo_status` - Gets the status of the demo environment. +* :doc:`flytectl_demo_teardown` - Cleans up the demo environment + diff --git a/flytectl/docs/source/gen/flytectl_demo_exec.rst b/flytectl/docs/source/gen/flytectl_demo_exec.rst new file mode 100644 index 0000000000..8a0c9c4861 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo_exec.rst @@ -0,0 +1,106 @@ +.. _flytectl_demo_exec: + +flytectl demo exec +------------------ + +Executes non-interactive command inside the demo container + +Synopsis +~~~~~~~~ + + + +Run non-interactive commands inside the demo container and immediately return the output. +By default, "flytectl exec" is present in the /root directory inside the demo container. + +:: + + flytectl demo exec -- ls -al + +Usage + +:: + + flytectl demo exec [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for exec + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_demo_reload.rst b/flytectl/docs/source/gen/flytectl_demo_reload.rst new file mode 100644 index 0000000000..9fccacec1c --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo_reload.rst @@ -0,0 +1,119 @@ +.. _flytectl_demo_reload: + +flytectl demo reload +-------------------- + +Power cycle the Flyte executable pod, effectively picking up an updated config. + +Synopsis +~~~~~~~~ + + + +If you've changed the ~/.flyte/state/flyte.yaml file, run this command to restart the Flyte binary pod, effectively +picking up the new settings: + +Usage +:: + + flytectl demo reload + + + +:: + + flytectl demo reload [flags] + +Options +~~~~~~~ + +:: + + --dev Optional. Only start minio and postgres in the sandbox. + --disable-agent Optional. Disable the agent service. + --dryRun Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use' + --env strings Optional. Provide Env variable in key=value format which can be passed to sandbox container. + --force Optional. Forcefully delete existing sandbox cluster if it exists. + -h, --help help for reload + --image string Optional. Provide a fully qualified path to a Flyte compliant docker image. + --imagePullOptions.platform string Forces a specific platform's image to be pulled.' + --imagePullOptions.registryAuth string The base64 encoded credentials for the registry. + --imagePullPolicy ImagePullPolicy Optional. Defines the image pull behavior [Always/IfNotPresent/Never] (default Always) + --pre Optional. Pre release Version of flyte will be used for sandbox. + --source string deprecated, path of your source code, please build images with local daemon + --version string Version of flyte. Only supports flyte releases greater than v0.10.0 + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_demo_start.rst b/flytectl/docs/source/gen/flytectl_demo_start.rst new file mode 100644 index 0000000000..89ed10f585 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo_start.rst @@ -0,0 +1,180 @@ +.. _flytectl_demo_start: + +flytectl demo start +------------------- + +Starts the Flyte demo cluster. + +Synopsis +~~~~~~~~ + + + +Flyte demo is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte demo as a single Docker container locally. + +Starts the demo cluster without any source code: +:: + + flytectl demo start + +Runs a dev cluster, which only has minio and postgres pod. +:: + + flytectl demo start --dev + +Mounts your source code repository inside the demo cluster: +:: + + flytectl demo start --source=$HOME/flyteorg/flytesnacks + +Specify a Flyte demo compliant image with the registry. This is useful in case you want to use an image from your registry. +:: + + flytectl demo start --image docker.io/my-override:latest + +Note: If image flag is passed then Flytectl will ignore version and pre flags. + +Specify a Flyte demo image pull policy. Possible pull policy values are Always, IfNotPresent, or Never: +:: + + flytectl demo start --image docker.io/my-override:latest --imagePullPolicy Always + +Runs a specific version of Flyte. Flytectl demo only supports Flyte version available in the Github release, https://github.com/flyteorg/flyte/tags. +:: + + flytectl demo start --version=v0.14.0 + +.. note:: + Flytectl demo is only supported for Flyte versions >= v1.0.0 + +Runs the latest pre release of Flyte. +:: + + flytectl demo start --pre + +Start demo cluster passing environment variables. This can be used to pass docker specific env variables or flyte specific env variables. +eg : for passing timeout value in secs for the demo container use the following. +:: + + flytectl demo start --env FLYTE_TIMEOUT=700 + +The DURATION can be a positive integer or a floating-point number, followed by an optional unit suffix:: +s - seconds (default) +m - minutes +h - hours +d - days +When no unit is used, it defaults to seconds. If the duration is set to zero, the associated timeout is disabled. + + +eg : for passing multiple environment variables +:: + + flytectl demo start --env USER=foo --env PASSWORD=bar + + +For just printing the docker commands for bringing up the demo container +:: + + flytectl demo start --dryRun + +Usage + + +:: + + flytectl demo start [flags] + +Options +~~~~~~~ + +:: + + --dev Optional. Only start minio and postgres in the sandbox. + --disable-agent Optional. Disable the agent service. + --dryRun Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use' + --env strings Optional. Provide Env variable in key=value format which can be passed to sandbox container. + --force Optional. Forcefully delete existing sandbox cluster if it exists. + -h, --help help for start + --image string Optional. Provide a fully qualified path to a Flyte compliant docker image. + --imagePullOptions.platform string Forces a specific platform's image to be pulled.' + --imagePullOptions.registryAuth string The base64 encoded credentials for the registry. + --imagePullPolicy ImagePullPolicy Optional. Defines the image pull behavior [Always/IfNotPresent/Never] (default Always) + --pre Optional. Pre release Version of flyte will be used for sandbox. + --source string deprecated, path of your source code, please build images with local daemon + --version string Version of flyte. Only supports flyte releases greater than v0.10.0 + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_demo_status.rst b/flytectl/docs/source/gen/flytectl_demo_status.rst new file mode 100644 index 0000000000..3d21334326 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo_status.rst @@ -0,0 +1,106 @@ +.. _flytectl_demo_status: + +flytectl demo status +-------------------- + +Gets the status of the demo environment. + +Synopsis +~~~~~~~~ + + + +Retrieves the status of the demo environment. Currently, Flyte demo runs as a local Docker container. + +Usage +:: + + flytectl demo status + + + +:: + + flytectl demo status [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for status + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_demo_teardown.rst b/flytectl/docs/source/gen/flytectl_demo_teardown.rst new file mode 100644 index 0000000000..42d9c22630 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_demo_teardown.rst @@ -0,0 +1,107 @@ +.. _flytectl_demo_teardown: + +flytectl demo teardown +---------------------- + +Cleans up the demo environment + +Synopsis +~~~~~~~~ + + + +Removes the demo cluster and all the Flyte config created by 'demo start': +:: + + flytectl demo teardown + + +Usage + + +:: + + flytectl demo teardown [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for teardown + -v, --volume Optional. Clean up Docker volume. This will result in a permanent loss of all data within the database and object store. Use with caution! + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_demo` - Helps with demo interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_get.rst b/flytectl/docs/source/gen/flytectl_get.rst new file mode 100644 index 0000000000..f48ddf3ef8 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get.rst @@ -0,0 +1,110 @@ +.. _flytectl_get: + +flytectl get +------------ + +Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + +Synopsis +~~~~~~~~ + + + +To fetch a project, use the following command: +:: + + flytectl get project + + +Options +~~~~~~~ + +:: + + -h, --help help for get + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_get_cluster-resource-attribute` - Gets matchable resources of cluster resource attributes. +* :doc:`flytectl_get_execution` - Gets execution resources. +* :doc:`flytectl_get_execution-cluster-label` - Gets matchable resources of execution cluster label. +* :doc:`flytectl_get_execution-queue-attribute` - Gets matchable resources of execution queue attributes. +* :doc:`flytectl_get_launchplan` - Gets the launch plan resources. +* :doc:`flytectl_get_plugin-override` - Gets matchable resources of plugin override. +* :doc:`flytectl_get_project` - Gets project resources +* :doc:`flytectl_get_task` - Gets task resources +* :doc:`flytectl_get_task-resource-attribute` - Gets matchable resources of task attributes. +* :doc:`flytectl_get_workflow` - Gets workflow resources +* :doc:`flytectl_get_workflow-execution-config` - Gets matchable resources of workflow execution config. + diff --git a/flytectl/docs/source/gen/flytectl_get_cluster-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_get_cluster-resource-attribute.rst new file mode 100644 index 0000000000..b242491a88 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_cluster-resource-attribute.rst @@ -0,0 +1,142 @@ +.. _flytectl_get_cluster-resource-attribute: + +flytectl get cluster-resource-attribute +--------------------------------------- + +Gets matchable resources of cluster resource attributes. + +Synopsis +~~~~~~~~ + + + +Retrieve cluster resource attributes for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get cluster-resource-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","attributes":{"buzz":"lightyear","foo":"bar"}} + +Retrieve cluster resource attributes for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get cluster-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","attributes":{"buzz":"lightyear","foo":"bar"}} + +Write the cluster resource attributes to a file. If there are no cluster resource attributes, the command throws an error. +The config file is written to cra.yaml file. +Example: content of cra.yaml: + +:: + + flytectl get task-resource-attribute --attrFile cra.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +Usage + + +:: + + flytectl get cluster-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + -h, --help help for cluster-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_execution-cluster-label.rst b/flytectl/docs/source/gen/flytectl_get_execution-cluster-label.rst new file mode 100644 index 0000000000..9147ff0736 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_execution-cluster-label.rst @@ -0,0 +1,141 @@ +.. _flytectl_get_execution-cluster-label: + +flytectl get execution-cluster-label +------------------------------------ + +Gets matchable resources of execution cluster label. + +Synopsis +~~~~~~~~ + + + +Retrieve the execution cluster label for a given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain, run: +:: + + flytectl get execution-cluster-label -p flytesnacks -d development + +The output would look like: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","value":"foo"} + +Retrieve the execution cluster label for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get execution-cluster-label -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","value":"foo"} + +Write the execution cluster label to a file. If there is no execution cluster label, the command throws an error. +The config file is written to ecl.yaml file. +Example: content of ecl.yaml: + +:: + + flytectl get execution-cluster-label --attrFile ecl.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +Usage + + +:: + + flytectl get execution-cluster-label [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + -h, --help help for execution-cluster-label + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_execution-queue-attribute.rst b/flytectl/docs/source/gen/flytectl_get_execution-queue-attribute.rst new file mode 100644 index 0000000000..71929da6ae --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_execution-queue-attribute.rst @@ -0,0 +1,144 @@ +.. _flytectl_get_execution-queue-attribute: + +flytectl get execution-queue-attribute +-------------------------------------- + +Gets matchable resources of execution queue attributes. + +Synopsis +~~~~~~~~ + + + +Retrieve the execution queue attribute for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get execution-queue-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","tags":["foo", "bar"]} + +Retrieve the execution queue attribute for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get execution-queue-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","tags":["foo", "bar"]} + +Write the execution queue attribute to a file. If there are no execution queue attributes, the command throws an error. +The config file is written to era.yaml file. +Example: content of era.yaml: + +:: + + flytectl get execution-queue-attribute --attrFile era.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +Usage + + +:: + + flytectl get execution-queue-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + -h, --help help for execution-queue-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_execution.rst b/flytectl/docs/source/gen/flytectl_get_execution.rst new file mode 100644 index 0000000000..56ed2a6a70 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_execution.rst @@ -0,0 +1,174 @@ +.. _flytectl_get_execution: + +flytectl get execution +---------------------- + +Gets execution resources. + +Synopsis +~~~~~~~~ + + + +Retrieve all executions within the project and domain. +:: + + flytectl get execution -p flytesnacks -d development + +.. note:: + The terms execution/executions are interchangeable in these commands. + +Retrieve executions by name within the project and domain. +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r + +Retrieve all the executions with filters. +:: + + flytectl get execution -p flytesnacks -d development --filter.fieldSelector="execution.phase in (FAILED;SUCCEEDED),execution.duration<200" + + +Retrieve executions as per the specified limit and sorting parameters. +:: + + flytectl get execution -p flytesnacks -d development --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve executions present in other pages by specifying the limit and page number. + +:: + + flytectl get -p flytesnacks -d development execution --filter.limit=10 --filter.page=2 + +Retrieve executions within the project and domain in YAML format. + +:: + + flytectl get execution -p flytesnacks -d development -o yaml + +Retrieve executions within the project and domain in JSON format. + +:: + + flytectl get execution -p flytesnacks -d development -o json + + +Get more details of the execution using the --details flag, which shows node and task executions. +The default view is a tree view, and the TABLE view format is not supported on this view. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --details + +Fetch execution details in YAML format. In this view, only node details are available. For task, pass the --nodeID flag. +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --details -o yaml + +Fetch task executions on a specific node using the --nodeID flag. Use the nodeID attribute given by the node details view. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --nodeID n0 + +Task execution view is available in YAML/JSON format too. The following example showcases YAML, where the output contains input and output data of each node. + +:: + + flytectl get execution -p flytesnacks -d development oeh94k9r2r --nodeID n0 -o yaml + +Usage + + +:: + + flytectl get execution [flags] + +Options +~~~~~~~ + +:: + + --details gets node execution details. Only applicable for single execution name i.e get execution name --details + --filter.asc Specifies the sorting order. By default flytectl sort result in descending order + --filter.fieldSelector string Specifies the Field selector + --filter.limit int32 Specifies the limit (default 100) + --filter.page int32 Specifies the page number, in case there are multiple pages of results (default 1) + --filter.sortBy string Specifies which field to sort results (default "created_at") + -h, --help help for execution + --nodeID string get task executions for given node name. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_launchplan.rst b/flytectl/docs/source/gen/flytectl_get_launchplan.rst new file mode 100644 index 0000000000..e340b86636 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_launchplan.rst @@ -0,0 +1,209 @@ +.. _flytectl_get_launchplan: + +flytectl get launchplan +----------------------- + +Gets the launch plan resources. + +Synopsis +~~~~~~~~ + + + +Retrieve all launch plans within the project and domain: +:: + + flytectl get launchplan -p flytesnacks -d development + +.. note:: + + The terms launchplan/launchplans are interchangeable in these commands. + +Retrieve a launch plan by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet + + +Retrieve the latest version of the task by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet --latest + +Retrieve a particular version of the launch plan by name within the project and domain: + +:: + + flytectl get launchplan -p flytesnacks -d development core.basic.lp.go_greet --version v2 + +Retrieve all launch plans for a given workflow name: + +:: + + flytectl get launchplan -p flytesnacks -d development --workflow core.flyte_basics.lp.go_greet + +Retrieve all the launch plans with filters: +:: + + flytectl get launchplan -p flytesnacks -d development --filter.fieldSelector="name=core.basic.lp.go_greet" + +Retrieve all active launch plans: +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml --filter.fieldSelector "state=1" + +Retrieve all archived launch plans: +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml --filter.fieldSelector "state=0" + +Retrieve launch plans entity search across all versions with filters: +:: + + flytectl get launchplan -p flytesnacks -d development k8s_spark.dataframe_passing.my_smart_schema --filter.fieldSelector="version=v1" + + +Retrieve all the launch plans with limit and sorting: +:: + + flytectl get launchplan -p flytesnacks -d development --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve launch plans present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development launchplan --filter.limit=10 --filter.page=2 + +Retrieve all launch plans within the project and domain in YAML format: + +:: + + flytectl get launchplan -p flytesnacks -d development -o yaml + +Retrieve all launch plans the within the project and domain in JSON format: + +:: + + flytectl get launchplan -p flytesnacks -d development -o json + +Retrieve a launch plan within the project and domain as per a version and generates the execution spec file; the file can be used to launch the execution using the 'create execution' command: + +:: + + flytectl get launchplan -d development -p flytesnacks core.control_flow.merge_sort.merge_sort --execFile execution_spec.yaml + +The generated file would look similar to this: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + numbers: + - 0 + numbers_count: 0 + run_local_at_count: 10 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + version: v3 + workflow: core.control_flow.merge_sort.merge_sort + +Check the :ref:`create execution section` on how to launch one using the generated file. +Usage + + +:: + + flytectl get launchplan [flags] + +Options +~~~~~~~ + +:: + + --execFile string execution file name to be used for generating execution spec of a single launchplan. + --filter.asc Specifies the sorting order. By default flytectl sort result in descending order + --filter.fieldSelector string Specifies the Field selector + --filter.limit int32 Specifies the limit (default 100) + --filter.page int32 Specifies the page number, in case there are multiple pages of results (default 1) + --filter.sortBy string Specifies which field to sort results (default "created_at") + -h, --help help for launchplan + --latest flag to indicate to fetch the latest version, version flag will be ignored in this case + --version string version of the launchplan to be fetched. + --workflow string name of the workflow for which the launchplans need to be fetched. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_plugin-override.rst b/flytectl/docs/source/gen/flytectl_get_plugin-override.rst new file mode 100644 index 0000000000..bf9437513c --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_plugin-override.rst @@ -0,0 +1,163 @@ +.. _flytectl_get_plugin-override: + +flytectl get plugin-override +---------------------------- + +Gets matchable resources of plugin override. + +Synopsis +~~~~~~~~ + + + +Retrieve the plugin override for the given project and domain. +For project flytesnacks and development domain: + +:: + + flytectl get plugin-override -p flytesnacks -d development + +Example: output from the command + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "overrides": [{ + "task_type": "python_task", + "plugin_id": ["pluginoverride1", "pluginoverride2"], + "missing_plugin_behavior": 0 + }] + } + +Retrieve the plugin override for the given project, domain, and workflow. +For project flytesnacks, development domain and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get plugin-override -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "workflow": "core.control_flow.merge_sort.merge_sort" + "overrides": [{ + "task_type": "python_task", + "plugin_id": ["pluginoverride1", "pluginoverride2"], + "missing_plugin_behavior": 0 + }] + } + +Write plugin overrides to a file. If there are no plugin overrides, the command throws an error. +The config file is written to po.yaml file. +Example: content of po.yaml: + +:: + + flytectl get plugin-override --attrFile po.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +Usage + + +:: + + flytectl get plugin-override [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + -h, --help help for plugin-override + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_project.rst b/flytectl/docs/source/gen/flytectl_get_project.rst new file mode 100644 index 0000000000..07d25570a2 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_project.rst @@ -0,0 +1,146 @@ +.. _flytectl_get_project: + +flytectl get project +-------------------- + +Gets project resources + +Synopsis +~~~~~~~~ + + + +Retrieve all the projects: +:: + + flytectl get project + +.. note:: + The terms project/projects are interchangeable in these commands. + +Retrieve project by name: + +:: + + flytectl get project flytesnacks + +Retrieve all the projects with filters: +:: + + flytectl get project --filter.fieldSelector="project.name=flytesnacks" + +Retrieve all the projects with limit and sorting: +:: + + flytectl get project --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve projects present in other pages by specifying the limit and page number: +:: + + flytectl get project --filter.limit=10 --filter.page=2 + +Retrieve all the projects in yaml format: + +:: + + flytectl get project -o yaml + +Retrieve all the projects in json format: + +:: + + flytectl get project -o json + +Usage + + +:: + + flytectl get project [flags] + +Options +~~~~~~~ + +:: + + --filter.asc Specifies the sorting order. By default flytectl sort result in descending order + --filter.fieldSelector string Specifies the Field selector + --filter.limit int32 Specifies the limit (default 100) + --filter.page int32 Specifies the page number, in case there are multiple pages of results (default 1) + --filter.sortBy string Specifies which field to sort results (default "created_at") + -h, --help help for project + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_task-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_get_task-resource-attribute.rst new file mode 100644 index 0000000000..2b2f369afe --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_task-resource-attribute.rst @@ -0,0 +1,146 @@ +.. _flytectl_get_task-resource-attribute: + +flytectl get task-resource-attribute +------------------------------------ + +Gets matchable resources of task attributes. + +Synopsis +~~~~~~~~ + + + +Retrieve task resource attributes for the given project and domain. +For project flytesnacks and development domain: +:: + + flytectl get task-resource-attribute -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"450Mi"}} + +Retrieve task resource attributes for the given project, domain, and workflow. +For project flytesnacks, development domain, and workflow 'core.control_flow.merge_sort.merge_sort': +:: + + flytectl get task-resource-attribute -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + {"project":"flytesnacks","domain":"development","workflow":"core.control_flow.merge_sort.merge_sort","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"450Mi"}} + + +Write the task resource attributes to a file. If there are no task resource attributes, a file would be populated with the basic data. +The config file is written to tra.yaml file. +Example: content of tra.yaml: + +:: + + flytectl get task-resource-attribute --attrFile tra.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +Usage + + +:: + + flytectl get task-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + -h, --help help for task-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_task.rst b/flytectl/docs/source/gen/flytectl_get_task.rst new file mode 100644 index 0000000000..383645221e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_task.rst @@ -0,0 +1,190 @@ +.. _flytectl_get_task: + +flytectl get task +----------------- + +Gets task resources + +Synopsis +~~~~~~~~ + + + + +Retrieve all the tasks within project and domain: +:: + + flytectl get task -p flytesnacks -d development + +.. note:: + The terms task/tasks are interchangeable in these commands. + +Retrieve task by name within project and domain: + +:: + + flytectl task -p flytesnacks -d development core.basic.lp.greet + +Retrieve latest version of task by name within project and domain: + +:: + + flytectl get task -p flytesnacks -d development core.basic.lp.greet --latest + +Retrieve particular version of task by name within project and domain: + +:: + + flytectl get task -p flytesnacks -d development core.basic.lp.greet --version v2 + +Retrieve all the tasks with filters: +:: + + flytectl get task -p flytesnacks -d development --filter.fieldSelector="task.name=k8s_spark.pyspark_pi.print_every_time,task.version=v1" + +Retrieve a specific task with filters: +:: + + flytectl get task -p flytesnacks -d development k8s_spark.pyspark_pi.print_every_time --filter.fieldSelector="task.version=v1,created_at>=2021-05-24T21:43:12.325335Z" + +Retrieve all the tasks with limit and sorting: +:: + + flytectl get -p flytesnacks -d development task --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve tasks present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development task --filter.limit=10 --filter.page=2 + +Retrieve all the tasks within project and domain in yaml format: +:: + + flytectl get task -p flytesnacks -d development -o yaml + +Retrieve all the tasks within project and domain in json format: + +:: + + flytectl get task -p flytesnacks -d development -o json + +Retrieve tasks within project and domain for a version and generate the execution spec file for it to be used for launching the execution using create execution: + +:: + + flytectl get tasks -d development -p flytesnacks core.control_flow.merge_sort.merge --execFile execution_spec.yaml --version v2 + +The generated file would look similar to this: + +.. code-block:: yaml + + iamRoleARN: "" + inputs: + sorted_list1: + - 0 + sorted_list2: + - 0 + kubeServiceAcct: "" + targetDomain: "" + targetProject: "" + task: core.control_flow.merge_sort.merge + version: v2 + +Check the create execution section on how to launch one using the generated file. + +Usage + + +:: + + flytectl get task [flags] + +Options +~~~~~~~ + +:: + + --execFile string execution file name to be used for generating execution spec of a single task. + --filter.asc Specifies the sorting order. By default flytectl sort result in descending order + --filter.fieldSelector string Specifies the Field selector + --filter.limit int32 Specifies the limit (default 100) + --filter.page int32 Specifies the page number, in case there are multiple pages of results (default 1) + --filter.sortBy string Specifies which field to sort results (default "created_at") + -h, --help help for task + --latest flag to indicate to fetch the latest version, version flag will be ignored in this case + --version string version of the task to be fetched. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_workflow-execution-config.rst b/flytectl/docs/source/gen/flytectl_get_workflow-execution-config.rst new file mode 100644 index 0000000000..8c332c3ada --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_workflow-execution-config.rst @@ -0,0 +1,203 @@ +.. _flytectl_get_workflow-execution-config: + +flytectl get workflow-execution-config +-------------------------------------- + +Gets matchable resources of workflow execution config. + +Synopsis +~~~~~~~~ + + + +Retrieve workflow execution config for the given project and domain, in combination with the workflow name. + +For project flytesnacks and development domain: + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "max_parallelism": 5 + } + +Retrieve workflow execution config for the project, domain, and workflow. +For project flytesnacks, development domain and workflow 'core.control_flow.merge_sort.merge_sort': + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development core.control_flow.merge_sort.merge_sort + +Example: output from the command: + +.. code-block:: json + + { + "project": "flytesnacks", + "domain": "development", + "workflow": "core.control_flow.merge_sort.merge_sort" + "max_parallelism": 5 + } + +Write the workflow execution config to a file. If there are no workflow execution config, the command throws an error. +The config file is written to wec.yaml file. +Example: content of wec.yaml: + +:: + + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml + + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + +Generate a sample workflow execution config file to be used for creating a new workflow execution config at project domain + +:: + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml --gen + + +.. code-block:: yaml + + annotations: + values: + cliAnnotationKey: cliAnnotationValue + domain: development + labels: + values: + cliLabelKey: cliLabelValue + max_parallelism: 10 + project: flytesnacks + raw_output_data_config: + output_location_prefix: cliOutputLocationPrefix + security_context: + run_as: + k8s_service_account: default + + + +Generate a sample workflow execution config file to be used for creating a new workflow execution config at project domain workflow level + +:: + flytectl get workflow-execution-config -p flytesnacks -d development --attrFile wec.yaml flytectl get workflow-execution-config --gen + + +.. code-block:: yaml + + annotations: + values: + cliAnnotationKey: cliAnnotationValue + domain: development + labels: + values: + cliLabelKey: cliLabelValue + max_parallelism: 10 + project: flytesnacks + workflow: k8s_spark.dataframe_passing.my_smart_structured_dataset + raw_output_data_config: + output_location_prefix: cliOutputLocationPrefix + security_context: + run_as: + k8s_service_account: default + + +Usage + + +:: + + flytectl get workflow-execution-config [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for generating attribute for the resource type. + --gen generates an empty workflow execution config file with conformance to the api format. + -h, --help help for workflow-execution-config + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_get_workflow.rst b/flytectl/docs/source/gen/flytectl_get_workflow.rst new file mode 100644 index 0000000000..f446fdeb9f --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_get_workflow.rst @@ -0,0 +1,174 @@ +.. _flytectl_get_workflow: + +flytectl get workflow +--------------------- + +Gets workflow resources + +Synopsis +~~~~~~~~ + + + +Retrieve all the workflows within project and domain (workflow/workflows can be used interchangeably in these commands): +:: + + flytectl get workflow -p flytesnacks -d development + +Retrieve all versions of a workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet + +Retrieve latest version of workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet --latest + +Retrieve particular version of workflow by name within project and domain: + +:: + + flytectl get workflow -p flytesnacks -d development core.basic.lp.go_greet --version v2 + +Retrieve all the workflows with filters: +:: + + flytectl get workflow -p flytesnacks -d development --filter.fieldSelector="workflow.name=k8s_spark.dataframe_passing.my_smart_schema" + +Retrieve specific workflow with filters: +:: + + flytectl get workflow -p flytesnacks -d development k8s_spark.dataframe_passing.my_smart_schema --filter.fieldSelector="workflow.version=v1" + +Retrieve all the workflows with limit and sorting: +:: + + flytectl get -p flytesnacks -d development workflow --filter.sortBy=created_at --filter.limit=1 --filter.asc + +Retrieve workflows present in other pages by specifying the limit and page number: +:: + + flytectl get -p flytesnacks -d development workflow --filter.limit=10 --filter.page 2 + +Retrieve all the workflows within project and domain in yaml format: + +:: + + flytectl get workflow -p flytesnacks -d development -o yaml + +Retrieve all the workflow within project and domain in json format: + +:: + + flytectl get workflow -p flytesnacks -d development -o json + +Visualize the graph for a workflow within project and domain in dot format: + +:: + + flytectl get workflow -p flytesnacks -d development core.flyte_basics.basic_workflow.my_wf --latest -o dot + +Visualize the graph for a workflow within project and domain in a dot content render: + +:: + + flytectl get workflow -p flytesnacks -d development core.flyte_basics.basic_workflow.my_wf --latest -o doturl + +Usage + + +:: + + flytectl get workflow [flags] + +Options +~~~~~~~ + +:: + + --filter.asc Specifies the sorting order. By default flytectl sort result in descending order + --filter.fieldSelector string Specifies the Field selector + --filter.limit int32 Specifies the limit (default 100) + --filter.page int32 Specifies the page number, in case there are multiple pages of results (default 1) + --filter.sortBy string Specifies which field to sort results + -h, --help help for workflow + --latest flag to indicate to fetch the latest version, version flag will be ignored in this case + --version string version of the workflow to be fetched. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_get` - Fetches various Flyte resources such as tasks, workflows, launch plans, executions, and projects. + diff --git a/flytectl/docs/source/gen/flytectl_register.rst b/flytectl/docs/source/gen/flytectl_register.rst new file mode 100644 index 0000000000..745dffa9b6 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_register.rst @@ -0,0 +1,101 @@ +.. _flytectl_register: + +flytectl register +----------------- + +Registers tasks, workflows, and launch plans from a list of generated serialized files. + +Synopsis +~~~~~~~~ + + + +Take input files as serialized versions of the tasks/workflows/launchplans and register them with FlyteAdmin. +Currently, these input files are protobuf files generated as output from Flytekit serialize. +Project and Domain are mandatory fields to be passed for registration and an optional version which defaults to v1. +If the entities are already registered with Flyte for the same version, the registration would fail. + + +Options +~~~~~~~ + +:: + + -h, --help help for register + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_register_examples` - Registers Flytesnacks example. +* :doc:`flytectl_register_files` - Registers file resources. + diff --git a/flytectl/docs/source/gen/flytectl_register_examples.rst b/flytectl/docs/source/gen/flytectl_register_examples.rst new file mode 100644 index 0000000000..9c681548d1 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_register_examples.rst @@ -0,0 +1,125 @@ +.. _flytectl_register_examples: + +flytectl register examples +-------------------------- + +Registers Flytesnacks example. + +Synopsis +~~~~~~~~ + + + +Register all the latest Flytesnacks examples: +:: + + flytectl register examples -d development -p flytesnacks + +Register specific release of Flytesnacks examples: +:: + + flytectl register examples -d development -p flytesnacks --version v0.2.176 + +.. note:: + The register command automatically override the version with release version. + +Usage + + +:: + + flytectl register examples [flags] + +Options +~~~~~~~ + +:: + + --archive Pass in archive file either an http link or local path. + --assumableIamRole string Custom assumable iam auth role to register launch plans with. + --continueOnError Continue on error when registering files. + --destinationDirectory string Location of source code in container. + --dryRun Execute command without making any modifications. + --enableSchedule Enable the schedule if the files contain schedulable launchplan. + --force Force use of version number on entities registered with flyte. + -h, --help help for examples + --k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --version string Version of the entity to be registered with flyte which are un-versioned after serialization. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_register` - Registers tasks, workflows, and launch plans from a list of generated serialized files. + diff --git a/flytectl/docs/source/gen/flytectl_register_files.rst b/flytectl/docs/source/gen/flytectl_register_files.rst new file mode 100644 index 0000000000..512b1166b6 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_register_files.rst @@ -0,0 +1,202 @@ +.. _flytectl_register_files: + +flytectl register files +----------------------- + +Registers file resources. + +Synopsis +~~~~~~~~ + + + +Registers all the serialized protobuf files including tasks, workflows and launch plans with default v1 version. + +If previously registered entities with v1 version are present, the command will fail immediately on the first such encounter. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks + +As per Flytectl, registration and fast registration mean the same! + +In fast registration, the input provided by the user is fast serialized proto generated by pyflyte. +When the user runs pyflyte with --fast flag, then pyflyte creates serialized proto and the source code archive file in the same directory. +Flytectl finds the input file by searching for an archive file whose name starts with "fast" and has .tar.gz extension. +If Flytectl finds any source code in users' input, it considers the registration as fast registration. + +SourceUploadPath is an optional flag. By default, Flytectl will create SourceUploadPath from your storage config. +If s3, Flytectl will upload the code base to s3://{{DEFINE_BUCKET_IN_STORAGE_CONFIG}}/fast/{{VERSION}}-fast{{MD5_CREATED_BY_PYFLYTE}.tar.gz}. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 + +In case of fast registration, if the SourceUploadPath flag is defined, Flytectl will not use the default directory to upload the source code. +Instead, it will override the destination path on the registration. +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 --SourceUploadPath="s3://dummy/fast" + +To register a .tgz or .tar file, use the --archive flag. They can be local or remote files served through http/https. + +:: + + flytectl register files http://localhost:8080/_pb_output.tar -d development -p flytesnacks --archive + +Using local tgz file: + +:: + + flytectl register files _pb_output.tgz -d development -p flytesnacks --archive + +If you wish to continue executing registration on other files by ignoring the errors including the version conflicts, then send the continueOnError flag: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError + +Using short format of continueOnError flag: +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError + +Override the default version v1 using version string: +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 + +Changing the o/p format has no effect on the registration. The O/p is currently available only in table format: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError -o yaml + +Override IamRole during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --assumableIamRole "arn:aws:iam::123456789:role/dummy" + +Override Kubernetes service account during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --k8sServiceAccount "kubernetes-service-account" + +Override Output location prefix during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --outputLocationPrefix "s3://dummy/prefix" + +Override Destination dir of source code in container during registration: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --continueOnError --version v2 --destinationDirectory "/root" + +Enable schedule for the launchplans part of the serialized protobuf files: + +:: + + flytectl register file _pb_output/* -d development -p flytesnacks --version v2 --enableSchedule + +Usage + + +:: + + flytectl register files [flags] + +Options +~~~~~~~ + +:: + + --archive Pass in archive file either an http link or local path. + --assumableIamRole string Custom assumable iam auth role to register launch plans with. + --continueOnError Continue on error when registering files. + --destinationDirectory string Location of source code in container. + --dryRun Execute command without making any modifications. + --enableSchedule Enable the schedule if the files contain schedulable launchplan. + --force Force use of version number on entities registered with flyte. + -h, --help help for files + --k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --version string Version of the entity to be registered with flyte which are un-versioned after serialization. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_register` - Registers tasks, workflows, and launch plans from a list of generated serialized files. + diff --git a/flytectl/docs/source/gen/flytectl_sandbox.rst b/flytectl/docs/source/gen/flytectl_sandbox.rst new file mode 100644 index 0000000000..8cc08fc448 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_sandbox.rst @@ -0,0 +1,127 @@ +.. _flytectl_sandbox: + +flytectl sandbox +---------------- + +Helps with sandbox interactions like start, teardown, status, and exec. + +Synopsis +~~~~~~~~ + + + +Flyte Sandbox is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte sandbox as a single Docker container locally. + +To create a sandbox cluster, run: +:: + + flytectl sandbox start + +To remove a sandbox cluster, run: +:: + + flytectl sandbox teardown + +To check the status of the sandbox container, run: +:: + + flytectl sandbox status + +To execute commands inside the sandbox container, use exec: +:: + + flytectl sandbox exec -- pwd + +For just printing the docker commands for bringingup the demo container +:: + + flytectl demo start --dryRun + + + +Options +~~~~~~~ + +:: + + -h, --help help for sandbox + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_sandbox_exec` - Executes non-interactive command inside the sandbox container +* :doc:`flytectl_sandbox_start` - Starts the Flyte sandbox cluster. +* :doc:`flytectl_sandbox_status` - Gets the status of the sandbox environment. +* :doc:`flytectl_sandbox_teardown` - Cleans up the sandbox environment + diff --git a/flytectl/docs/source/gen/flytectl_sandbox_exec.rst b/flytectl/docs/source/gen/flytectl_sandbox_exec.rst new file mode 100644 index 0000000000..f1f3c44600 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_sandbox_exec.rst @@ -0,0 +1,106 @@ +.. _flytectl_sandbox_exec: + +flytectl sandbox exec +--------------------- + +Executes non-interactive command inside the sandbox container + +Synopsis +~~~~~~~~ + + + +Run non-interactive commands inside the sandbox container and immediately return the output. +By default, "flytectl exec" is present in the /root directory inside the sandbox container. + +:: + + flytectl sandbox exec -- ls -al + +Usage + +:: + + flytectl sandbox exec [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for exec + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_sandbox` - Helps with sandbox interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_sandbox_start.rst b/flytectl/docs/source/gen/flytectl_sandbox_start.rst new file mode 100644 index 0000000000..048b92b24e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_sandbox_start.rst @@ -0,0 +1,175 @@ +.. _flytectl_sandbox_start: + +flytectl sandbox start +---------------------- + +Starts the Flyte sandbox cluster. + +Synopsis +~~~~~~~~ + + + +Flyte sandbox is a fully standalone minimal environment for running Flyte. +It provides a simplified way of running Flyte sandbox as a single Docker container locally. + +Starts the sandbox cluster without any source code: +:: + + flytectl sandbox start + +Mounts your source code repository inside the sandbox: + +:: + + flytectl sandbox start --source=$HOME/flyteorg/flytesnacks + +Runs a specific version of Flyte. Flytectl sandbox only supports Flyte version available in the Github release, https://github.com/flyteorg/flyte/tags. + +:: + + flytectl sandbox start --version=v0.14.0 + +.. note:: + Flytectl Sandbox is only supported for Flyte versions > v0.10.0. + +Runs the latest pre release of Flyte. +:: + + flytectl sandbox start --pre + +Note: The pre release flag will be ignored if the user passes the version flag. In that case, Flytectl will use a specific version. + +Specify a Flyte Sandbox compliant image with the registry. This is useful in case you want to use an image from your registry. +:: + + flytectl sandbox start --image docker.io/my-override:latest + +Note: If image flag is passed then Flytectl will ignore version and pre flags. + +Specify a Flyte Sandbox image pull policy. Possible pull policy values are Always, IfNotPresent, or Never: +:: + + flytectl sandbox start --image docker.io/my-override:latest --imagePullPolicy Always + +Start sandbox cluster passing environment variables. This can be used to pass docker specific env variables or flyte specific env variables. +eg : for passing timeout value in secs for the sandbox container use the following. +:: + + flytectl sandbox start --env FLYTE_TIMEOUT=700 + + +The DURATION can be a positive integer or a floating-point number, followed by an optional unit suffix:: +s - seconds (default) +m - minutes +h - hours +d - days +When no unit is used, it defaults to seconds. If the duration is set to zero, the associated timeout is disabled. + + +eg : for passing multiple environment variables +:: + + flytectl sandbox start --env USER=foo --env PASSWORD=bar + + +Usage + + +:: + + flytectl sandbox start [flags] + +Options +~~~~~~~ + +:: + + --dev Optional. Only start minio and postgres in the sandbox. + --disable-agent Optional. Disable the agent service. + --dryRun Optional. Only print the docker commands to bring up flyte sandbox/demo container.This will still call github api's to get the latest flyte release to use' + --env strings Optional. Provide Env variable in key=value format which can be passed to sandbox container. + --force Optional. Forcefully delete existing sandbox cluster if it exists. + -h, --help help for start + --image string Optional. Provide a fully qualified path to a Flyte compliant docker image. + --imagePullOptions.platform string Forces a specific platform's image to be pulled.' + --imagePullOptions.registryAuth string The base64 encoded credentials for the registry. + --imagePullPolicy ImagePullPolicy Optional. Defines the image pull behavior [Always/IfNotPresent/Never] (default Always) + --pre Optional. Pre release Version of flyte will be used for sandbox. + --source string deprecated, path of your source code, please build images with local daemon + --version string Version of flyte. Only supports flyte releases greater than v0.10.0 + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_sandbox` - Helps with sandbox interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_sandbox_status.rst b/flytectl/docs/source/gen/flytectl_sandbox_status.rst new file mode 100644 index 0000000000..abce271578 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_sandbox_status.rst @@ -0,0 +1,106 @@ +.. _flytectl_sandbox_status: + +flytectl sandbox status +----------------------- + +Gets the status of the sandbox environment. + +Synopsis +~~~~~~~~ + + + +Retrieves the status of the sandbox environment. Currently, Flyte sandbox runs as a local Docker container. + +Usage +:: + + flytectl sandbox status + + + +:: + + flytectl sandbox status [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for status + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_sandbox` - Helps with sandbox interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_sandbox_teardown.rst b/flytectl/docs/source/gen/flytectl_sandbox_teardown.rst new file mode 100644 index 0000000000..c57c64b61a --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_sandbox_teardown.rst @@ -0,0 +1,106 @@ +.. _flytectl_sandbox_teardown: + +flytectl sandbox teardown +------------------------- + +Cleans up the sandbox environment + +Synopsis +~~~~~~~~ + + + +Removes the Sandbox cluster and all the Flyte config created by 'sandbox start': +:: + + flytectl sandbox teardown + + +Usage + + +:: + + flytectl sandbox teardown [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for teardown + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_sandbox` - Helps with sandbox interactions like start, teardown, status, and exec. + diff --git a/flytectl/docs/source/gen/flytectl_update.rst b/flytectl/docs/source/gen/flytectl_update.rst new file mode 100644 index 0000000000..ffc18b7101 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update.rst @@ -0,0 +1,112 @@ +.. _flytectl_update: + +flytectl update +--------------- + +Update Flyte resources e.g., project. + +Synopsis +~~~~~~~~ + + + +Provides subcommands to update Flyte resources, such as tasks, workflows, launch plans, executions, and projects. +Update Flyte resource; e.g., to activate a project: +:: + + flytectl update project -p flytesnacks --activate + + +Options +~~~~~~~ + +:: + + -h, --help help for update + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool +* :doc:`flytectl_update_cluster-resource-attribute` - Update matchable resources of cluster attributes +* :doc:`flytectl_update_execution` - Updates the execution status +* :doc:`flytectl_update_execution-cluster-label` - Update matchable resources of execution cluster label +* :doc:`flytectl_update_execution-queue-attribute` - Update matchable resources of execution queue attributes +* :doc:`flytectl_update_launchplan` - Updates launch plan status +* :doc:`flytectl_update_launchplan-meta` - Updates the launch plan metadata +* :doc:`flytectl_update_plugin-override` - Update matchable resources of plugin overrides +* :doc:`flytectl_update_project` - Update the characteristics of a project +* :doc:`flytectl_update_task-meta` - Update task metadata +* :doc:`flytectl_update_task-resource-attribute` - Update matchable resources of task attributes +* :doc:`flytectl_update_workflow-execution-config` - Updates matchable resources of workflow execution config +* :doc:`flytectl_update_workflow-meta` - Update workflow metadata + diff --git a/flytectl/docs/source/gen/flytectl_update_cluster-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_update_cluster-resource-attribute.rst new file mode 100644 index 0000000000..55ea963f65 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_cluster-resource-attribute.rst @@ -0,0 +1,142 @@ +.. _flytectl_update_cluster-resource-attribute: + +flytectl update cluster-resource-attribute +------------------------------------------ + +Update matchable resources of cluster attributes + +Synopsis +~~~~~~~~ + + + +Update cluster resource attributes for given project and domain combination or additionally with workflow name. + +Updating to the cluster resource attribute is only available from a generated file. See the get section to generate this file. +It takes input for cluster resource attributes from the config file cra.yaml, +Example: content of cra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + attributes: + foo: "bar" + buzz: "lightyear" + +:: + + flytectl update cluster-resource-attribute --attrFile cra.yaml + +Update cluster resource attribute for project and domain and workflow combination. This will take precedence over any other +resource attribute defined at project domain level. +This will completely overwrite any existing custom project, domain and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute that is already set and then update it to have new values. +Refer to get cluster-resource-attribute section on how to generate this file. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + attributes: + foo: "bar" + buzz: "lightyear" + +:: + + flytectl update cluster-resource-attribute --attrFile cra.yaml + +Usage + + + +:: + + flytectl update cluster-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for cluster-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_execution-cluster-label.rst b/flytectl/docs/source/gen/flytectl_update_execution-cluster-label.rst new file mode 100644 index 0000000000..0117986578 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_execution-cluster-label.rst @@ -0,0 +1,135 @@ +.. _flytectl_update_execution-cluster-label: + +flytectl update execution-cluster-label +--------------------------------------- + +Update matchable resources of execution cluster label + +Synopsis +~~~~~~~~ + + + +Update execution cluster label for the given project and domain combination or additionally with workflow name. + +Updating to the execution cluster label is only available from a generated file. See the get section to generate this file. +It takes input for execution cluster label from the config file ecl.yaml +Example: content of ecl.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + value: foo + +:: + + flytectl update execution-cluster-label --attrFile ecl.yaml + +Update execution cluster label for project, domain, and workflow combination. This will take precedence over any other +execution cluster label defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + value: foo + +:: + + flytectl update execution-cluster-label --attrFile ecl.yaml + +Usage + + + +:: + + flytectl update execution-cluster-label [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for execution-cluster-label + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_execution-queue-attribute.rst b/flytectl/docs/source/gen/flytectl_update_execution-queue-attribute.rst new file mode 100644 index 0000000000..5b3f080c03 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_execution-queue-attribute.rst @@ -0,0 +1,146 @@ +.. _flytectl_update_execution-queue-attribute: + +flytectl update execution-queue-attribute +----------------------------------------- + +Update matchable resources of execution queue attributes + +Synopsis +~~~~~~~~ + + + +Update execution queue attributes for the given project and domain combination or additionally with workflow name. + +Updating the execution queue attribute is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project, domain, and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute that is already set and then update it to have new values. +Refer to get execution-queue-attribute section on how to generate this file +It takes input for execution queue attributes from the config file era.yaml, +Example: content of era.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + tags: + - foo + - bar + - buzz + - lightyear + +:: + + flytectl update execution-queue-attribute --attrFile era.yaml + +Update execution queue attribute for project, domain, and workflow combination. This will take precedence over any other +execution queue attribute defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + tags: + - foo + - bar + - buzz + - lightyear + +:: + + flytectl update execution-queue-attribute --attrFile era.yaml + +Usage + + + +:: + + flytectl update execution-queue-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for execution-queue-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_execution.rst b/flytectl/docs/source/gen/flytectl_update_execution.rst new file mode 100644 index 0000000000..373b625c9a --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_execution.rst @@ -0,0 +1,115 @@ +.. _flytectl_update_execution: + +flytectl update execution +------------------------- + +Updates the execution status + +Synopsis +~~~~~~~~ + + + +Activate an execution; and it shows up in the CLI and UI: +:: + + flytectl update execution -p flytesnacks -d development oeh94k9r2r --activate + +Archive an execution; and it is hidden from the CLI and UI: +:: + + flytectl update execution -p flytesnacks -d development oeh94k9r2r --archive + + +Usage + + +:: + + flytectl update execution [flags] + +Options +~~~~~~~ + +:: + + --activate activate execution. + --archive archive execution. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for execution + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_launchplan-meta.rst b/flytectl/docs/source/gen/flytectl_update_launchplan-meta.rst new file mode 100644 index 0000000000..8e28b948c3 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_launchplan-meta.rst @@ -0,0 +1,120 @@ +.. _flytectl_update_launchplan-meta: + +flytectl update launchplan-meta +------------------------------- + +Updates the launch plan metadata + +Synopsis +~~~~~~~~ + + + +Update the description on the launch plan: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --description "Mergesort example" + +Archiving launch plan named entity is not supported and would throw an error: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --archive + +Activating launch plan named entity would be a noop: +:: + + flytectl update launchplan-meta -p flytesnacks -d development core.advanced.merge_sort.merge_sort --activate + +Usage + + +:: + + flytectl update launchplan-meta [flags] + +Options +~~~~~~~ + +:: + + --activate activate the named entity. + --archive archive named entity. + --description string description of the named entity. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for launchplan-meta + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_launchplan.rst b/flytectl/docs/source/gen/flytectl_update_launchplan.rst new file mode 100644 index 0000000000..bb9992861d --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_launchplan.rst @@ -0,0 +1,116 @@ +.. _flytectl_update_launchplan: + +flytectl update launchplan +-------------------------- + +Updates launch plan status + +Synopsis +~~~~~~~~ + + + +Activates a `launch plan `__ which activates the scheduled job associated with it: +:: + + flytectl update launchplan -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --version v1 --activate + +Deactivates a `launch plan `__ which deschedules any scheduled job associated with it: +:: + + flytectl update launchplan -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --version v1 --deactivate + +Usage + + +:: + + flytectl update launchplan [flags] + +Options +~~~~~~~ + +:: + + --activate activate launchplan. + --archive (Deprecated) disable the launch plan schedule (if it has an active schedule associated with it). + --deactivate disable the launch plan schedule (if it has an active schedule associated with it). + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for launchplan + --version string version of the launchplan to be fetched. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_plugin-override.rst b/flytectl/docs/source/gen/flytectl_update_plugin-override.rst new file mode 100644 index 0000000000..95f744f85e --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_plugin-override.rst @@ -0,0 +1,148 @@ +.. _flytectl_update_plugin-override: + +flytectl update plugin-override +------------------------------- + +Update matchable resources of plugin overrides + +Synopsis +~~~~~~~~ + + + +Update plugin overrides for given project and domain combination or additionally with workflow name. + +Updating to the plugin override is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing plugins overrides on custom project, domain, and workflow combination. +It is preferable to do get and generate a plugin override file if there is an existing override already set and then update it to have new values. +Refer to get plugin-override section on how to generate this file +It takes input for plugin overrides from the config file po.yaml, +Example: content of po.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +:: + + flytectl update plugin-override --attrFile po.yaml + +Update plugin override for project, domain, and workflow combination. This will take precedence over any other +plugin overrides defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + overrides: + - task_type: python_task # Task type for which to apply plugin implementation overrides + plugin_id: # Plugin id(s) to be used in place of the default for the task type. + - plugin_override1 + - plugin_override2 + missing_plugin_behavior: 1 # Behavior when no specified plugin_id has an associated handler. 0 : FAIL , 1: DEFAULT + +:: + + flytectl update plugin-override --attrFile po.yaml + +Usage + + + +:: + + flytectl update plugin-override [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for plugin-override + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_project.rst b/flytectl/docs/source/gen/flytectl_update_project.rst new file mode 100644 index 0000000000..5d27380478 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_project.rst @@ -0,0 +1,186 @@ +.. _flytectl_update_project: + +flytectl update project +----------------------- + +Update the characteristics of a project + +Synopsis +~~~~~~~~ + + + +Allows you to update the characteristics of a project, including its name, labels and description. +Also allows you to archive or activate (unarchive) a project. + +To archive a project, specify its ID with the *p* flag and add the *archive* flag: + +:: + + flytectl update project -p my-project-id --archive + +To activate (unarchive) an archived project, specify its ID with the *p* flag and add the *activate* flag: + +:: + + flytectl update project -p my-project-id --activate + +To update the characteristics of a project using flags, specify the project ID with the *p* flag and the flags corresponding to the characteristics you want to update: + +:: + + flytectl update project -p my-project-id --description "A wonderful project" --labels app=my-app + +To update the characteristics of a project using a *yaml* file, define the file with the project ID desired updates: + +.. code-block:: yaml + + id: "my-project-id" + name: "my-project-name" + labels: + values: + app: my-app + description: "A wonderful project" + + +(Note: The name parameter must not contain whitespace) + +Then, pass it in using the *file* flag: + +:: + + flytectl update project --file project.yaml + +To archive or activate (unarchive) a project using a *yaml* file: + +* Add a state field, with a value of *0* for activated (unarchived) or *1* for archived, at the top level of the the *yaml* file. + +* Add the *archive* flag to the command. + +For example, to archive a project: + +.. code-block:: yaml + + # update.yaml + id: "my-project-id" + state: 1 + +:: + + $ uctl update project --file update.yaml --archive + +And to activate (unarchive) the same project: + +.. code-block:: yaml + + # update.yaml + id: "my-project-id" + state: 0 + +:: + + $ uctl update project --file update.yaml --archive + +Note that when using a *yaml* file, the *activate* flag is not used. +Instead, the *archive* flag is used for *both* archiving and activating (unarchiving) with the difference being in the *state* field of the *yaml* file. +Furthermore, the *state* field only takes effect if the *archive* flag is present in the command. + +Usage + + +:: + + flytectl update project [flags] + +Options +~~~~~~~ + +:: + + --activate Activates the project specified as argument. Only used in update + --activateProject (Deprecated) Activates the project specified as argument. Only used in update + --archive Archives the project specified as argument. Only used in update + --archiveProject (Deprecated) Archives the project specified as argument. Only used in update + --description string description for the project specified as argument. + --dryRun execute command without making any modifications. + --file string file for the project definition. + --force Skips asking for an acknowledgement during an update operation. Only used in update + -h, --help help for project + --id string id for the project specified as argument. + --labels stringToString labels for the project specified as argument. (default []) + --name string name for the project specified as argument. + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_task-meta.rst b/flytectl/docs/source/gen/flytectl_update_task-meta.rst new file mode 100644 index 0000000000..61312af748 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_task-meta.rst @@ -0,0 +1,120 @@ +.. _flytectl_update_task-meta: + +flytectl update task-meta +------------------------- + +Update task metadata + +Synopsis +~~~~~~~~ + + + +Update the description on the task: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --description "Merge sort example" + +Archiving task named entity is not supported and would throw an error: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --archive + +Activating task named entity would be a noop since archiving is not possible: +:: + + flytectl update task-meta -d development -p flytesnacks core.control_flow.merge_sort.merge --activate + +Usage + + +:: + + flytectl update task-meta [flags] + +Options +~~~~~~~ + +:: + + --activate activate the named entity. + --archive archive named entity. + --description string description of the named entity. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for task-meta + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_task-resource-attribute.rst b/flytectl/docs/source/gen/flytectl_update_task-resource-attribute.rst new file mode 100644 index 0000000000..0ef4798aab --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_task-resource-attribute.rst @@ -0,0 +1,148 @@ +.. _flytectl_update_task-resource-attribute: + +flytectl update task-resource-attribute +--------------------------------------- + +Update matchable resources of task attributes + +Synopsis +~~~~~~~~ + + + +Updates the task resource attributes for the given project and domain combination or additionally with workflow name. + +Updating the task resource attribute is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project, domain, and workflow combination attributes. +It is preferable to do get and generate an attribute file if there is an existing attribute already set and then update it to have new values. +Refer to get task-resource-attribute section on how to generate this file. +It takes input for task resource attributes from the config file tra.yaml, +Example: content of tra.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +:: + + flytectl update task-resource-attribute --attrFile tra.yaml + +Update task resource attribute for project, domain, and workflow combination. This will take precedence over any other +resource attribute defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + defaults: + cpu: "1" + memory: "150Mi" + limits: + cpu: "2" + memory: "450Mi" + +:: + + flytectl update task-resource-attribute --attrFile tra.yaml + +Usage + + + +:: + + flytectl update task-resource-attribute [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for task-resource-attribute + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_workflow-execution-config.rst b/flytectl/docs/source/gen/flytectl_update_workflow-execution-config.rst new file mode 100644 index 0000000000..b025df8a57 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_workflow-execution-config.rst @@ -0,0 +1,144 @@ +.. _flytectl_update_workflow-execution-config: + +flytectl update workflow-execution-config +----------------------------------------- + +Updates matchable resources of workflow execution config + +Synopsis +~~~~~~~~ + + + +Updates the workflow execution config for the given project and domain combination or additionally with workflow name. + +Updating the workflow execution config is only available from a generated file. See the get section for generating this file. +This will completely overwrite any existing custom project and domain and workflow combination execution config. +It is preferable to do get and generate a config file if there is an existing execution config already set and then update it to have new values. +Refer to get workflow-execution-config section on how to generate this file. +It takes input for workflow execution config from the config file wec.yaml, +Example: content of wec.yaml: + +.. code-block:: yaml + + domain: development + project: flytesnacks + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: demo + +:: + + flytectl update workflow-execution-config --attrFile wec.yaml + +Update workflow execution config for project, domain, and workflow combination. This will take precedence over any other +execution config defined at project domain level. +For workflow 'core.control_flow.merge_sort.merge_sort' in flytesnacks project, development domain, it is: + +.. code-block:: yaml + + domain: development + project: flytesnacks + workflow: core.control_flow.merge_sort.merge_sort + max_parallelism: 5 + security_context: + run_as: + k8s_service_account: mergesortsa + +:: + + flytectl update workflow-execution-config --attrFile wec.yaml + +Usage + + + +:: + + flytectl update workflow-execution-config [flags] + +Options +~~~~~~~ + +:: + + --attrFile string attribute file name to be used for updating attribute for the resource type. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for workflow-execution-config + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_update_workflow-meta.rst b/flytectl/docs/source/gen/flytectl_update_workflow-meta.rst new file mode 100644 index 0000000000..aadccfabd2 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_update_workflow-meta.rst @@ -0,0 +1,120 @@ +.. _flytectl_update_workflow-meta: + +flytectl update workflow-meta +----------------------------- + +Update workflow metadata + +Synopsis +~~~~~~~~ + + + +Update the description on the workflow: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --description "Mergesort workflow example" + +Archiving workflow named entity would cause this to disappear from flyteconsole UI: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --archive + +Activate workflow named entity: +:: + + flytectl update workflow-meta -p flytesnacks -d development core.control_flow.merge_sort.merge_sort --activate + +Usage + + +:: + + flytectl update workflow-meta [flags] + +Options +~~~~~~~ + +:: + + --activate activate the named entity. + --archive archive named entity. + --description string description of the named entity. + --dryRun execute command without making any modifications. + --force do not ask for an acknowledgement during updates. + -h, --help help for workflow-meta + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl_update` - Update Flyte resources e.g., project. + diff --git a/flytectl/docs/source/gen/flytectl_upgrade.rst b/flytectl/docs/source/gen/flytectl_upgrade.rst new file mode 100644 index 0000000000..a0bcedda9d --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_upgrade.rst @@ -0,0 +1,114 @@ +.. _flytectl_upgrade: + +flytectl upgrade +---------------- + +Upgrades/rollbacks to a Flyte version. + +Synopsis +~~~~~~~~ + + + +For Flytectl, it is: +:: + + flytectl upgrade + +.. note:: + Please upgrade with sudo. Failing to do so may result in a permission issues. + +Rollback Flytectl binary: +:: + + flytectl upgrade rollback + +.. note:: + Upgrade is not available on Windows. + + +:: + + flytectl upgrade [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for upgrade + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool + diff --git a/flytectl/docs/source/gen/flytectl_version.rst b/flytectl/docs/source/gen/flytectl_version.rst new file mode 100644 index 0000000000..220a375741 --- /dev/null +++ b/flytectl/docs/source/gen/flytectl_version.rst @@ -0,0 +1,103 @@ +.. _flytectl_version: + +flytectl version +---------------- + +Fetches Flyte version + +Synopsis +~~~~~~~~ + + + +Fetch Flytectl version. +:: + + flytectl version + + +:: + + flytectl version [flags] + +Options +~~~~~~~ + +:: + + -h, --help help for version + +Options inherited from parent commands +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + --admin.audience string Audience to use when initiating OAuth2 authorization requests. + --admin.authType string Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values (default "ClientSecret") + --admin.authorizationHeader string Custom metadata header to pass JWT + --admin.authorizationServerUrl string This is the URL to your IdP's authorization server. It'll default to Endpoint + --admin.caCertFilePath string Use specified certificate file to verify the admin server peer. + --admin.clientId string Client ID (default "flytepropeller") + --admin.clientSecretEnvVar string Environment variable containing the client secret + --admin.clientSecretLocation string File containing the client secret (default "/etc/secrets/client_secret") + --admin.command strings Command for external authentication token generation + --admin.defaultServiceConfig string + --admin.deviceFlowConfig.pollInterval string amount of time the device flow would poll the token endpoint if auth server doesn't return a polling interval. Okta and google IDP do return an interval' (default "5s") + --admin.deviceFlowConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.deviceFlowConfig.timeout string amount of time the device flow should complete or else it will be cancelled. (default "10m0s") + --admin.endpoint string For admin types, specify where the uri of the service is located. + --admin.httpProxyURL string OPTIONAL: HTTP Proxy to be used for OAuth requests. + --admin.insecure Use insecure connection. + --admin.insecureSkipVerify InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. Caution : shouldn't be use for production usecases' + --admin.maxBackoffDelay string Max delay for grpc backoff (default "8s") + --admin.maxRetries int Max number of gRPC retries (default 4) + --admin.perRetryTimeout string gRPC per retry timeout (default "15s") + --admin.pkceConfig.refreshTime string grace period from the token expiry after which it would refresh the token. (default "5m0s") + --admin.pkceConfig.timeout string Amount of time the browser session would be active for authentication from client app. (default "2m0s") + --admin.scopes strings List of scopes to request + --admin.tokenRefreshWindow string Max duration between token refresh attempt and token expiry. (default "0s") + --admin.tokenUrl string OPTIONAL: Your IdP's token endpoint. It'll be discovered from flyte admin's OAuth Metadata endpoint if not provided. + --admin.useAudienceFromAdmin Use Audience configured from admins public endpoint config. + --admin.useAuth Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information. + -c, --config string config file (default is $HOME/.flyte/config.yaml) + --console.endpoint string Endpoint of console, if different than flyte admin + -d, --domain string Specifies the Flyte project's domain. + --files.archive Pass in archive file either an http link or local path. + --files.assumableIamRole string Custom assumable iam auth role to register launch plans with. + --files.continueOnError Continue on error when registering files. + --files.destinationDirectory string Location of source code in container. + --files.dryRun Execute command without making any modifications. + --files.enableSchedule Enable the schedule if the files contain schedulable launchplan. + --files.force Force use of version number on entities registered with flyte. + --files.k8ServiceAccount string Deprecated. Please use --K8sServiceAccount + --files.k8sServiceAccount string Custom kubernetes service account auth role to register launch plans with. + --files.outputLocationPrefix string Custom output location prefix for offloaded types (files/schemas). + --files.sourceUploadPath string Deprecated: Update flyte admin to avoid having to configure storage access from flytectl. + --files.version string Version of the entity to be registered with flyte which are un-versioned after serialization. + --logger.formatter.type string Sets logging format type. (default "json") + --logger.level int Sets the minimum logging level. (default 3) + --logger.mute Mutes all logs regardless of severity. Intended for benchmarks/tests only. + --logger.show-source Includes source code location in logs. + -o, --output string Specifies the output type - supported formats [TABLE JSON YAML DOT DOTURL]. NOTE: dot, doturl are only supported for Workflow (default "TABLE") + -p, --project string Specifies the Flyte project. + --storage.cache.max_size_mbs int Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used + --storage.cache.target_gc_percent int Sets the garbage collection target percentage. + --storage.connection.access-key string Access key to use. Only required when authtype is set to accesskey. + --storage.connection.auth-type string Auth Type to use [iam, accesskey]. (default "iam") + --storage.connection.disable-ssl Disables SSL connection. Should only be used for development. + --storage.connection.endpoint string URL for storage client to connect to. + --storage.connection.region string Region to connect to. (default "us-east-1") + --storage.connection.secret-key string Secret to use when accesskey is set. + --storage.container string Initial container (in s3 a bucket) to create -if it doesn't exist-.' + --storage.defaultHttpClient.timeout string Sets time out on the http client. (default "0s") + --storage.enable-multicontainer If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered + --storage.limits.maxDownloadMBs int Maximum allowed download size (in MBs) per call. (default 2) + --storage.stow.config stringToString Configuration for stow backend. Refer to github/flyteorg/stow (default []) + --storage.stow.kind string Kind of Stow backend to use. Refer to github/flyteorg/stow + --storage.type string Sets the type of storage to configure [s3/minio/local/mem/stow]. (default "s3") + +SEE ALSO +~~~~~~~~ + +* :doc:`flytectl` - Flytectl CLI tool + diff --git a/flytectl/docs/source/generate_docs.go b/flytectl/docs/source/generate_docs.go new file mode 100644 index 0000000000..e64f2a5996 --- /dev/null +++ b/flytectl/docs/source/generate_docs.go @@ -0,0 +1,9 @@ +package main + +import "github.com/flyteorg/flytectl/cmd" + +func main() { + if err := cmd.GenerateDocs(); err != nil { + panic(err) + } +} diff --git a/flytectl/docs/source/index.rst b/flytectl/docs/source/index.rst new file mode 100644 index 0000000000..93ccc96622 --- /dev/null +++ b/flytectl/docs/source/index.rst @@ -0,0 +1,143 @@ +.. DO NOT EDIT THIS FILE! + This file is the index for the old flytekit documentation. The index for the monodocs is now + at `docs_index.rst`. Please edit that file if you want to add new entries to the flytekit api + documentation. + +.. flytectl doc + +#################################################### +Flytectl: The Official Flyte Command-line Interface +#################################################### + +Overview +========= +This video will take you on a tour of Flytectl - how to install and configure it, as well as how to use the Verbs and Nouns sections on the left hand side menu. Detailed information can be found in the sections below the video. + +.. youtube:: cV8ezYnBANE + + +Installation +============ + +Flytectl is a Golang binary that can be installed on any platform supported by Golang. + +.. tab-set:: + + .. tab-item:: OSX + + .. prompt:: bash $ + + brew install flyteorg/homebrew-tap/flytectl + + *Upgrade* existing installation using the following command: + + .. prompt:: bash $ + + flytectl upgrade + + .. tab-item:: Other Operating systems + + .. prompt:: bash $ + + curl -sL https://ctl.flyte.org/install | bash + + *Upgrade* existing installation using the following command: + + .. prompt:: bash $ + + flytectl upgrade + +**Test** if Flytectl is installed correctly (your Flytectl version should be > 0.2.0) using the following command: + +.. prompt:: bash $ + + flytectl version + +Configuration +============= + +Flytectl allows you to communicate with FlyteAdmin using a YAML file or by passing every configuration value +on the command-line. The following configuration can be used for the setup: + +Basic Configuration +-------------------- + +The full list of available configurable options can be found by running ``flytectl --help``, or `here `__. + +.. NOTE:: + + Currently, the Project ``-p``, Domain ``-d``, and Output ``-o`` flags cannot be used in the config file. + +.. tab-set:: + + .. tab-item:: Local Flyte Sandbox + + Automatically configured for you by ``flytectl sandbox`` command. + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///localhost:30081 + insecure: true # Set to false to enable TLS/SSL connection (not recommended except on local sandbox deployment). + authType: Pkce # authType: Pkce # if using authentication or just drop this. + + .. tab-item:: AWS Configuration + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:/// + authType: Pkce # authType: Pkce # if using authentication or just drop this. + insecure: true # insecure: True # Set to true if the endpoint isn't accessible through TLS/SSL connection (not recommended except on local sandbox deployment) + + .. tab-item:: GCS Configuration + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:/// + authType: Pkce # authType: Pkce # if using authentication or just drop this. + insecure: false # insecure: True # Set to true if the endpoint isn't accessible through TLS/SSL connection (not recommended except on local sandbox deployment) + + .. tab-item:: Others + + For other supported storage backends like Oracle, Azure, etc., refer to the configuration structure `here `__. + + Place the config file in ``$HOME/.flyte`` directory with the name config.yaml. + This file is typically searched in: + + * ``$HOME/.flyte`` + * currDir from where you run flytectl + * ``/etc/flyte/config`` + + You can also pass the file name in the command line using ``--config ``. + + +.. toctree:: + :maxdepth: 1 + :hidden: + + |plane| Getting Started + |book-reader| User Guide + |chalkboard| Tutorials + |project-diagram| Concepts + |rocket| Deployment + |book| API Reference + |hands-helping| Community + + +.. NOTE: the caption text is important for the sphinx theme to correctly render the nav header +.. https://github.com/flyteorg/furo +.. toctree:: + :maxdepth: -1 + :caption: Flytectl + :hidden: + + Flytectl + CLI Entrypoint + verbs + nouns + contribute diff --git a/flytectl/docs/source/launchplan.rst b/flytectl/docs/source/launchplan.rst new file mode 100644 index 0000000000..ad29e5229a --- /dev/null +++ b/flytectl/docs/source/launchplan.rst @@ -0,0 +1,11 @@ +Launchplan +----------- +It specifies the actions to be performed on the 'launchplan' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Launchplan + + gen/flytectl_get_launchplan + gen/flytectl_update_launchplan + gen/flytectl_update_launchplan-meta diff --git a/flytectl/docs/source/nouns.rst b/flytectl/docs/source/nouns.rst new file mode 100644 index 0000000000..150d3cab0c --- /dev/null +++ b/flytectl/docs/source/nouns.rst @@ -0,0 +1,26 @@ +.. _nouns: + +Nouns +------ +Flytectl nouns specify the resource on which the action needs to be performed. Examples of resources include project, workflow, task, and execution. + +.. toctree:: + :maxdepth: 1 + :caption: Nouns + + project + execution + workflow + task + task-resource-attribute + cluster-resource-attribute + execution-cluster-label + execution-queue-attribute + plugin-override + launchplan + workflow-execution-config + examples + files + config + sandbox + demo diff --git a/flytectl/docs/source/overview.rst b/flytectl/docs/source/overview.rst new file mode 100644 index 0000000000..aac4c99ee6 --- /dev/null +++ b/flytectl/docs/source/overview.rst @@ -0,0 +1,109 @@ +#################################################### +Flytectl: The Official Flyte Command-line Interface +#################################################### + +Overview +========= +This video will take you on a tour of Flytectl - how to install and configure it, as well as how to use the Verbs and Nouns sections on the left hand side menu. Detailed information can be found in the sections below the video. + +.. youtube:: cV8ezYnBANE + + +Installation +============ + +Flytectl is a Golang binary that can be installed on any platform supported by Golang. + +.. tab-set:: + + .. tab-item:: OSX + + .. prompt:: bash $ + + brew install flyteorg/homebrew-tap/flytectl + + *Upgrade* existing installation using the following command: + + .. prompt:: bash $ + + flytectl upgrade + + .. tab-item:: Other Operating systems + + .. prompt:: bash $ + + curl -sL https://ctl.flyte.org/install | bash + + *Upgrade* existing installation using the following command: + + .. prompt:: bash $ + + flytectl upgrade + +**Test** if Flytectl is installed correctly (your Flytectl version should be > 0.2.0) using the following command: + +.. prompt:: bash $ + + flytectl version + +Configuration +============= + +Flytectl allows you to communicate with FlyteAdmin using a YAML file or by passing every configuration value +on the command-line. The following configuration can be used for the setup: + +Basic Configuration +-------------------- + +The full list of available configurable options can be found by running ``flytectl --help``, or `here `__. + +.. NOTE:: + + Currently, the Project ``-p``, Domain ``-d``, and Output ``-o`` flags cannot be used in the config file. + +.. tab-set:: + + .. tab-item:: Local Flyte Sandbox + + Automatically configured for you by ``flytectl sandbox`` command. + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///localhost:30081 + insecure: true # Set to false to enable TLS/SSL connection (not recommended except on local sandbox deployment). + authType: Pkce # authType: Pkce # if using authentication or just drop this. + + .. tab-item:: AWS Configuration + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:/// + authType: Pkce # authType: Pkce # if using authentication or just drop this. + insecure: true # insecure: True # Set to true if the endpoint isn't accessible through TLS/SSL connection (not recommended except on local sandbox deployment) + + .. tab-item:: GCS Configuration + + .. code-block:: yaml + + admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:/// + authType: Pkce # authType: Pkce # if using authentication or just drop this. + insecure: false # insecure: True # Set to true if the endpoint isn't accessible through TLS/SSL connection (not recommended except on local sandbox deployment) + + .. tab-item:: Others + + For other supported storage backends like Oracle, Azure, etc., refer to the configuration structure `here `__. + + Place the config file in ``$HOME/.flyte`` directory with the name config.yaml. + This file is typically searched in: + + * ``$HOME/.flyte`` + * currDir from where you run flytectl + * ``/etc/flyte/config`` + + You can also pass the file name in the command line using ``--config ``. diff --git a/flytectl/docs/source/plugin-override.rst b/flytectl/docs/source/plugin-override.rst new file mode 100644 index 0000000000..7b1010ff1a --- /dev/null +++ b/flytectl/docs/source/plugin-override.rst @@ -0,0 +1,11 @@ +Plugin override +--------------- +It specifies the actions to be performed on the 'plugin-override' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Plugin override + + gen/flytectl_get_plugin-override + gen/flytectl_update_plugin-override + gen/flytectl_delete_plugin-override diff --git a/flytectl/docs/source/project.rst b/flytectl/docs/source/project.rst new file mode 100644 index 0000000000..05e1cc1113 --- /dev/null +++ b/flytectl/docs/source/project.rst @@ -0,0 +1,12 @@ +Project +-------- +It specifies the actions to be performed on the 'project' resource. + + +.. toctree:: + :maxdepth: 1 + :caption: Project + + gen/flytectl_create_project + gen/flytectl_get_project + gen/flytectl_update_project diff --git a/flytectl/docs/source/sandbox.rst b/flytectl/docs/source/sandbox.rst new file mode 100644 index 0000000000..f7d7bbc4bd --- /dev/null +++ b/flytectl/docs/source/sandbox.rst @@ -0,0 +1,12 @@ +Sandbox +------- +It specifies the actions to be performed on the 'sandbox' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Sandbox + + gen/flytectl_sandbox_start + gen/flytectl_sandbox_status + gen/flytectl_sandbox_teardown + gen/flytectl_sandbox_exec diff --git a/flytectl/docs/source/task-resource-attribute.rst b/flytectl/docs/source/task-resource-attribute.rst new file mode 100644 index 0000000000..edc168355a --- /dev/null +++ b/flytectl/docs/source/task-resource-attribute.rst @@ -0,0 +1,12 @@ +Task resource attribute +----------------------- +It specifies the actions to be performed on the 'task-resource-attribute' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Task resource attribute + + gen/flytectl_get_task-resource-attribute + gen/flytectl_update_task-resource-attribute + gen/flytectl_delete_task-resource-attribute + diff --git a/flytectl/docs/source/task.rst b/flytectl/docs/source/task.rst new file mode 100644 index 0000000000..8792ea31d4 --- /dev/null +++ b/flytectl/docs/source/task.rst @@ -0,0 +1,10 @@ +Task +------ +It specifies the actions to be performed on the 'task' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Task + + gen/flytectl_get_task + gen/flytectl_update_task-meta diff --git a/flytectl/docs/source/verbs.rst b/flytectl/docs/source/verbs.rst new file mode 100644 index 0000000000..403a96ec31 --- /dev/null +++ b/flytectl/docs/source/verbs.rst @@ -0,0 +1,21 @@ +Verbs +------ +Flytectl verbs specify the actions to be performed on the resources. Example: create, get, update, and delete. + + +.. toctree:: + :maxdepth: 1 + :caption: Verbs + + gen/flytectl_create + gen/flytectl_completion + gen/flytectl_get + gen/flytectl_update + gen/flytectl_delete + gen/flytectl_register + gen/flytectl_config + gen/flytectl_compile + gen/flytectl_sandbox + gen/flytectl_demo + gen/flytectl_version + gen/flytectl_upgrade diff --git a/flytectl/docs/source/workflow-execution-config.rst b/flytectl/docs/source/workflow-execution-config.rst new file mode 100644 index 0000000000..d4f94e76a9 --- /dev/null +++ b/flytectl/docs/source/workflow-execution-config.rst @@ -0,0 +1,12 @@ +Workflow execution config +------------------------- + +It specifies the actions to be performed on the 'workflow-execution-config' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Workflow execution config + + gen/flytectl_get_workflow-execution-config + gen/flytectl_update_workflow-execution-config + gen/flytectl_delete_workflow-execution-config diff --git a/flytectl/docs/source/workflow.rst b/flytectl/docs/source/workflow.rst new file mode 100644 index 0000000000..32c08574a3 --- /dev/null +++ b/flytectl/docs/source/workflow.rst @@ -0,0 +1,10 @@ +Workflow +-------- +It specifies the actions to be performed on the 'workflow' resource. + +.. toctree:: + :maxdepth: 1 + :caption: Workflow + + gen/flytectl_get_workflow + gen/flytectl_update_workflow-meta diff --git a/flytectl/go.mod b/flytectl/go.mod new file mode 100644 index 0000000000..9e492aa0ec --- /dev/null +++ b/flytectl/go.mod @@ -0,0 +1,178 @@ +module github.com/flyteorg/flytectl + +go 1.19 + +require ( + github.com/apoorvam/goterminal v0.0.0-20180523175556-614d345c47e5 + github.com/avast/retry-go v3.0.0+incompatible + github.com/awalterschulze/gographviz v2.0.3+incompatible + github.com/disiqueira/gotree v1.0.0 + github.com/docker/docker v20.10.7+incompatible + github.com/docker/go-connections v0.4.0 + github.com/enescakir/emoji v1.0.0 + github.com/flyteorg/flyte/flyteidl v1.9.12 + github.com/flyteorg/flyte/flytepropeller v1.9.12 + github.com/flyteorg/flyte/flytestdlib v1.9.12 + github.com/go-ozzo/ozzo-validation/v4 v4.3.0 + github.com/golang/protobuf v1.5.3 + github.com/google/go-cmp v0.5.9 + github.com/google/go-github/v42 v42.0.0 + github.com/google/uuid v1.3.0 + github.com/hashicorp/go-version v1.3.0 + github.com/hexops/gotextdiff v1.0.3 + github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 + github.com/landoop/tableprinter v0.0.0-20201125135848-89e81fc956e7 + github.com/mitchellh/mapstructure v1.5.0 + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 + github.com/mouuff/go-rocket-update v1.5.1 + github.com/opencontainers/image-spec v1.0.2 + github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/cobra v1.4.0 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.8.4 + github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 + github.com/zalando/go-keyring v0.1.1 + golang.org/x/oauth2 v0.7.0 + golang.org/x/text v0.9.0 + google.golang.org/grpc v1.56.1 + google.golang.org/protobuf v1.30.0 + gopkg.in/yaml.v3 v3.0.1 + gotest.tools v2.2.0+incompatible + k8s.io/api v0.24.1 + k8s.io/apimachinery v0.24.1 + k8s.io/client-go v0.24.1 + sigs.k8s.io/yaml v1.3.0 +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + cloud.google.com/go/storage v1.28.1 // indirect + github.com/Azure/azure-sdk-for-go v63.4.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.27 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Microsoft/go-winio v0.5.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 // indirect + github.com/aws/aws-sdk-go v1.44.2 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charmbracelet/bubbles v0.18.0 // indirect + github.com/charmbracelet/bubbletea v0.25.0 // indirect + github.com/charmbracelet/lipgloss v0.10.0 // indirect + github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect + github.com/containerd/containerd v1.5.10 // indirect + github.com/coocood/freecache v1.1.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/danieljoos/wincred v1.1.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/docker/distribution v2.8.0+incompatible // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/flyteorg/flyte/flyteplugins v0.0.0-00010101000000-000000000000 // indirect + github.com/flyteorg/stow v0.3.7 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/godbus/dbus/v5 v5.0.4 // indirect + github.com/gofrs/uuid v4.2.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.4.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncw/swift v1.0.53 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.11.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/time v0.1.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.114.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect +) + +replace ( + github.com/flyteorg/flyte/flyteidl => github.com/flyteorg/flyte/flyteidl v1.9.12 + github.com/flyteorg/flyte/flyteplugins => github.com/flyteorg/flyte/flyteplugins v1.9.12 + github.com/flyteorg/flyte/flytepropeller => github.com/flyteorg/flyte/flytepropeller v1.9.12 + github.com/flyteorg/flyte/flytestdlib => github.com/flyteorg/flyte/flytestdlib v1.9.12 +) diff --git a/flytectl/go.sum b/flytectl/go.sum new file mode 100644 index 0000000000..0d6318416f --- /dev/null +++ b/flytectl/go.sum @@ -0,0 +1,1495 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v63.4.0+incompatible h1:fle3M5Q7vr8auaiPffKyUQmLbvYeqpw30bKU6PrWJFo= +github.com/Azure/azure-sdk-for-go v63.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.1 h1:3CVsSo4mp8NDWO11tHzN/mdo2zP0CtaSK5IcwBjfqRA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.1/go.mod h1:w5pDIZuawUmY3Bj4tVx3Xb8KS96ToB0j315w9rqpAg0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0 h1:NVS/4LOQfkBpk+B1VopIzv1ptmYeEskA8w/3K/w7vjo= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 h1:Px2KVERcYEg2Lv25AqC2hVr0xUWaq94wuEObLIkYzmA= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2/go.mod h1:CdSJQNNzZhCkwDaV27XV1w48ZBPtxe7mlrZAsPNxD5g= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.0 h1:0nJeKDmB7a1a8RDMjTltahlPsaNlWjq/LpkZleSwINk= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.0/go.mod h1:mbwxKc/fW+IkF0GG591MuXw0KuEQBDkeRoZ9vmVJPxg= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apoorvam/goterminal v0.0.0-20180523175556-614d345c47e5 h1:VYqcjykqpcq262cDxBAkAelSdg6HETkxgwzQRTS40Aw= +github.com/apoorvam/goterminal v0.0.0-20180523175556-614d345c47e5/go.mod h1:E7x8aDc3AQzDKjEoIZCt+XYheHk2OkP+p2UgeNjecH8= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= +github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E= +github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.44.2 h1:5VBk5r06bgxgRKVaUtm1/4NT/rtrnH2E4cnAYv5zgQc= +github.com/aws/aws-sdk-go v1.44.2/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= +github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw= +github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM= +github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg= +github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= +github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= +github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= +github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0 h1:3RNcEpBg4IhIChZdFRSdlQt1QjCp1sMAPIrOnm7Yf8g= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/disiqueira/gotree v1.0.0 h1:en5wk87n7/Jyk6gVME3cx3xN9KmUCstJ1IjHr4Se4To= +github.com/disiqueira/gotree v1.0.0/go.mod h1:7CwL+VWsWAU95DovkdRZAtA7YbtHwGk+tLV/kNi8niU= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/flyteorg/flyte/flyteidl v1.9.12 h1:9V3rng4g6wheLf3j4ISuT6YZR5NX2/wSqqbJiOBYt00= +github.com/flyteorg/flyte/flyteidl v1.9.12/go.mod h1:87ELgkbZ26Fz95zkUZZP3rFc5qi2KXBxlqZ/NBqRkWQ= +github.com/flyteorg/flyte/flyteplugins v1.9.12 h1:ceXJSePUchdqlGKx9Y1Yj6BnuXMO+PKMtTT4ongfdAk= +github.com/flyteorg/flyte/flyteplugins v1.9.12/go.mod h1:PCM5jdV3iASLgLcdHOj5bzkY5Bz4eBunll14jrJmJOQ= +github.com/flyteorg/flyte/flytepropeller v1.9.12 h1:8wkLq6nk6HMLCTwZrQzn6lc1f8qRmv7cuQKWoXJJPH8= +github.com/flyteorg/flyte/flytepropeller v1.9.12/go.mod h1:lTYcULg7WZXbz4OyCibNB3z3gv7Et1u1hfruHuD5mb4= +github.com/flyteorg/flyte/flytestdlib v1.9.12 h1:Rm4c6e+/G6yeW4wm/+A1pClasJtZolELJQyikuv5O1A= +github.com/flyteorg/flyte/flytestdlib v1.9.12/go.mod h1:ZnpzKetFifz05KvjX4/Au23m3gdTkYHnN/MvVZvvJYk= +github.com/flyteorg/stow v0.3.7 h1:Cx7j8/Ux6+toD5hp5fy++927V+yAcAttDeQAlUD/864= +github.com/flyteorg/stow v0.3.7/go.mod h1:5dfBitPM004dwaZdoVylVjxFT4GWAgI0ghAndhNUzCo= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v42 v42.0.0 h1:YNT0FwjPrEysRkLIiKuEfSvBPCGKphW5aS5PxwaoLec= +github.com/google/go-github/v42 v42.0.0/go.mod h1:jgg/jvyI0YlDOM1/ps6XYh04HNQ3vKf0CVko62/EhRg= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 h1:M8exrBzuhWcU6aoHJlHWPe4qFjVKzkMGRal78f5jRRU= +github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23/go.mod h1:kBSna6b0/RzsOcOZf515vAXwSsXYusl2U7SA0XP09yI= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/landoop/tableprinter v0.0.0-20180806200924-8bd8c2576d27 h1:O664tckOIC4smyHDDJPXAh/YBYYc0Y1O8S5wmZDm3d8= +github.com/landoop/tableprinter v0.0.0-20180806200924-8bd8c2576d27/go.mod h1:f0X1c0za3TbET/rl5ThtCSel0+G3/yZ8iuU9BxnyVK0= +github.com/landoop/tableprinter v0.0.0-20201125135848-89e81fc956e7 h1:J6LE/95ZXKZLdAG5xF+FF+h+CEKF78+UN5ZV8VJSCCk= +github.com/landoop/tableprinter v0.0.0-20201125135848-89e81fc956e7/go.mod h1:f0X1c0za3TbET/rl5ThtCSel0+G3/yZ8iuU9BxnyVK0= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mouuff/go-rocket-update v1.5.1 h1:qGgUu/MP+aVQ63laEguRNimmNTPKs29xz0lZW6QRFaQ= +github.com/mouuff/go-rocket-update v1.5.1/go.mod h1:CnOyUYCxAJyC1g1mebSGC7gJysLTlX+RpxKgD1B0zLs= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34= +github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= +github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= +github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zalando/go-keyring v0.1.1 h1:w2V9lcx/Uj4l+dzAf1m9s+DJ1O8ROkEHnynonHjTcYE= +github.com/zalando/go-keyring v0.1.1/go.mod h1:OIC+OZ28XbmwFxU/Rp9V7eKzZjamBJwRzC8UFJH9+L8= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= +k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= +k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= +k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/flytectl/install.sh b/flytectl/install.sh new file mode 100755 index 0000000000..1110e4e737 --- /dev/null +++ b/flytectl/install.sh @@ -0,0 +1,393 @@ +#!/bin/sh +set -e +# Code generated by godownloader on 2021-10-24T11:29:26Z. DO NOT EDIT. +# + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -le "$_logp" ] +} +log_tag() { + case $1 in + 0) echo "emerg" ;; + 1) echo "alert" ;; + 2) echo "crit" ;; + 3) echo "err" ;; + 4) echo "warning" ;; + 5) echo "notice" ;; + 6) echo "info" ;; + 7) echo "debug" ;; + *) echo "$1" ;; + esac +} +log_debug() { + log_priority 7 || return 0 + echoerr "$(log_prefix)" "$(log_tag 7)" "$@" +} +log_info() { + log_priority 6 || return 0 + echoerr "$(log_prefix)" "$(log_tag 6)" "$@" +} +log_err() { + log_priority 3 || return 0 + echoerr "$(log_prefix)" "$(log_tag 3)" "$@" +} +log_crit() { + log_priority 2 || return 0 + echoerr "$(log_prefix)" "$(log_tag 2)" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo ${arch} +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;; + *.tar) tar --no-same-owner -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +http_download_curl() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") + else + code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") + fi + if [ "$code" != "200" ]; then + log_debug "http_download_curl received HTTP status $code" + return 1 + fi + return 0 +} +http_download_wget() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + wget -q -O "$local_file" "$source_url" + else + wget -q --header "$header" -O "$local_file" "$source_url" + fi +} +http_download() { + log_debug "http_download $2" + if is_command curl; then + http_download_curl "$@" + return + elif is_command wget; then + http_download_wget "$@" + return + fi + log_crit "http_download unable to find wget or curl" + return 1 +} +http_copy() { + tmp=$(mktemp) + http_download "${tmp}" "$1" "$2" || return 1 + body=$(cat "$tmp") + rm -f "${tmp}" + echo "$body" +} +github_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_copy "$giturl" "Accept:application/json") + test -z "$json" && return 1 + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null < 1 { + newMessages := getMessageList(firstBatchIndex - 1) + m.paginator.SetTotalPages(countTotalPages()) + firstBatchIndex-- + m.items = append(newMessages, m.items...) + m.items = m.items[:len(m.items)-batchLen[lastBatchIndex]] // delete the msgs in the "lastBatchIndex" batch + localPageIndex += batchLen[firstBatchIndex] / msgPerPage + lastBatchIndex-- + return + } +} diff --git a/flytectl/pkg/commandutils/command_utils.go b/flytectl/pkg/commandutils/command_utils.go new file mode 100644 index 0000000000..2d9dfbf81c --- /dev/null +++ b/flytectl/pkg/commandutils/command_utils.go @@ -0,0 +1,22 @@ +package commandutils + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +func AskForConfirmation(s string, reader io.Reader) bool { + fmt.Printf("%s [y/n]: ", s) + r := bufio.NewScanner(reader) + for r.Scan() { + response := strings.ToLower(strings.TrimSpace(r.Text())) + if response == "y" || response == "yes" { + return true + } else if response == "n" || response == "no" { + return false + } + } + return false +} diff --git a/flytectl/pkg/commandutils/command_utils_test.go b/flytectl/pkg/commandutils/command_utils_test.go new file mode 100644 index 0000000000..57fc4f82f6 --- /dev/null +++ b/flytectl/pkg/commandutils/command_utils_test.go @@ -0,0 +1,51 @@ +package commandutils + +import ( + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestCase struct { + Input io.Reader `json:"input"` + Output bool `json:"output"` +} + +func TestAskForConfirmation(t *testing.T) { + tests := []TestCase{ + { + Input: strings.NewReader("yes"), + Output: true, + }, + { + Input: strings.NewReader("y"), + Output: true, + }, + { + Input: strings.NewReader("no"), + Output: false, + }, + { + Input: strings.NewReader("n"), + Output: false, + }, + { + Input: strings.NewReader("No"), + Output: false, + }, + { + Input: strings.NewReader("Yes"), + Output: true, + }, + { + Input: strings.NewReader(""), + Output: false, + }, + } + for _, test := range tests { + answer := AskForConfirmation("Testing for yes", test.Input) + assert.Equal(t, test.Output, answer) + } +} diff --git a/flytectl/pkg/configutil/configutil.go b/flytectl/pkg/configutil/configutil.go new file mode 100644 index 0000000000..3727b09084 --- /dev/null +++ b/flytectl/pkg/configutil/configutil.go @@ -0,0 +1,83 @@ +package configutil + +import ( + "html/template" + "os" + + f "github.com/flyteorg/flytectl/pkg/filesystemutils" +) + +const ( + AdminConfigTemplate = `admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: {{.Host}} + insecure: {{.Insecure}} +{{- if .Console}} +console: + endpoint: {{.Console}} +{{- end}} +{{- if .DataConfig}} +# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please +# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to +# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as +# minio is s3 compatible and we ship with minio in sandbox. +storage: + connection: + endpoint: {{.DataConfig.Endpoint}} + access-key: {{.DataConfig.AccessKey}} + secret-key: {{.DataConfig.SecretKey}} +{{- end}} +` +) + +type DataConfig struct { + Endpoint string + AccessKey string + SecretKey string +} + +type ConfigTemplateSpec struct { + Host string + Insecure bool + Console string + DataConfig *DataConfig +} + +var ( + FlytectlConfig = f.FilePathJoin(f.UserHomeDir(), ".flyte", "config-sandbox.yaml") + ConfigFile = f.FilePathJoin(f.UserHomeDir(), ".flyte", "config.yaml") + Kubeconfig = f.FilePathJoin(f.UserHomeDir(), ".flyte", "k3s", "k3s.yaml") +) + +// GetTemplate returns cluster config +func GetTemplate() string { + return AdminConfigTemplate +} + +// SetupConfig download the Flyte sandbox config +func SetupConfig(filename, templateStr string, templateSpec ConfigTemplateSpec) error { + tmpl := template.New("config") + tmpl, err := tmpl.Parse(templateStr) + if err != nil { + return err + } + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + return tmpl.Execute(file, templateSpec) +} + +// ConfigCleanup will remove the sandbox config from Flyte dir +func ConfigCleanup() error { + err := os.Remove(FlytectlConfig) + if err != nil { + return err + } + err = os.RemoveAll(f.FilePathJoin(f.UserHomeDir(), ".flyte", "k3s")) + if err != nil { + return err + } + return nil +} diff --git a/flytectl/pkg/configutil/configutil_test.go b/flytectl/pkg/configutil/configutil_test.go new file mode 100644 index 0000000000..a8f8bf4d96 --- /dev/null +++ b/flytectl/pkg/configutil/configutil_test.go @@ -0,0 +1,130 @@ +package configutil + +import ( + "io/ioutil" + "os" + "testing" + + f "github.com/flyteorg/flytectl/pkg/filesystemutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSetupConfig(t *testing.T) { + file, err := os.CreateTemp("", "*.yaml") + require.NoError(t, err) + + templateValue := ConfigTemplateSpec{ + Host: "dns:///localhost:30081", + Insecure: true, + } + err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) + assert.NoError(t, err) + configBytes, err := ioutil.ReadAll(file) + assert.NoError(t, err) + expected := `admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///localhost:30081 + insecure: true +` + assert.Equal(t, expected, string(configBytes)) + + file, err = os.Create(file.Name()) + require.NoError(t, err) + templateValue = ConfigTemplateSpec{ + Host: "dns:///admin.example.com", + Insecure: true, + Console: "https://console.example.com", + } + err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) + assert.NoError(t, err) + configBytes, err = ioutil.ReadAll(file) + assert.NoError(t, err) + expected = `admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///admin.example.com + insecure: true +console: + endpoint: https://console.example.com +` + assert.Equal(t, expected, string(configBytes)) + + file, err = os.Create(file.Name()) + require.NoError(t, err) + templateValue = ConfigTemplateSpec{ + Host: "dns:///admin.example.com", + Insecure: true, + DataConfig: &DataConfig{ + Endpoint: "http://localhost:9000", + AccessKey: "my-access-key", + SecretKey: "my-secret-key", + }, + } + err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) + assert.NoError(t, err) + configBytes, err = ioutil.ReadAll(file) + assert.NoError(t, err) + expected = `admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///admin.example.com + insecure: true +# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please +# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to +# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as +# minio is s3 compatible and we ship with minio in sandbox. +storage: + connection: + endpoint: http://localhost:9000 + access-key: my-access-key + secret-key: my-secret-key +` + assert.Equal(t, expected, string(configBytes)) + + // Cleanup + if file != nil { + _ = os.Remove(file.Name()) + } +} + +func TestConfigCleanup(t *testing.T) { + _, err := os.Stat(f.FilePathJoin(f.UserHomeDir(), ".flyte")) + if os.IsNotExist(err) { + _ = os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte"), 0755) + } + _ = ioutil.WriteFile(FlytectlConfig, []byte("string"), 0600) + _ = ioutil.WriteFile(Kubeconfig, []byte("string"), 0600) + + err = ConfigCleanup() + assert.Nil(t, err) + + _, err = os.Stat(FlytectlConfig) + check := os.IsNotExist(err) + assert.Equal(t, check, true) + + _, err = os.Stat(Kubeconfig) + check = os.IsNotExist(err) + assert.Equal(t, check, true) + _ = ConfigCleanup() +} + +func TestSetupFlytectlConfig(t *testing.T) { + templateValue := ConfigTemplateSpec{ + Host: "dns:///localhost:30081", + Insecure: true, + } + _, err := os.Stat(f.FilePathJoin(f.UserHomeDir(), ".flyte")) + if os.IsNotExist(err) { + _ = os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte"), 0755) + } + err = SetupConfig("version.yaml", AdminConfigTemplate, templateValue) + assert.Nil(t, err) + _, err = os.Stat("version.yaml") + assert.Nil(t, err) + check := os.IsNotExist(err) + assert.Equal(t, check, false) + _ = ConfigCleanup() +} + +func TestAwsConfig(t *testing.T) { + assert.Equal(t, AdminConfigTemplate, GetTemplate()) +} diff --git a/flytectl/pkg/configutil/version.yaml b/flytectl/pkg/configutil/version.yaml new file mode 100644 index 0000000000..2f492d603a --- /dev/null +++ b/flytectl/pkg/configutil/version.yaml @@ -0,0 +1,8 @@ +admin: + # For GRPC endpoints you might want to use dns:///flyte.myexample.com + endpoint: dns:///localhost:30081 + authType: Pkce + insecure: true +logger: + show-source: true + level: 0 \ No newline at end of file diff --git a/flytectl/pkg/docker/docker.go b/flytectl/pkg/docker/docker.go new file mode 100644 index 0000000000..9d3ccb4efe --- /dev/null +++ b/flytectl/pkg/docker/docker.go @@ -0,0 +1,69 @@ +package docker + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +//go:generate mockery -all -case=underscore + +type Docker interface { + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error + ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) + ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ImageList(ctx context.Context, listOption types.ImageListOptions) ([]types.ImageSummary, error) + ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) + CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error +} + +type FlyteDocker struct { + *client.Client +} + +//go:generate enumer -type=ImagePullPolicy -trimprefix=ImagePullPolicy --json +type ImagePullPolicy int + +const ( + ImagePullPolicyAlways ImagePullPolicy = iota + ImagePullPolicyIfNotPresent + ImagePullPolicyNever +) + +// Set implements PFlag's Value interface to attempt to set the value of the flag from string. +func (i *ImagePullPolicy) Set(val string) error { + policy, err := ImagePullPolicyString(val) + if err != nil { + return err + } + + *i = policy + return nil +} + +// Type implements PFlag's Value interface to return type name. +func (i ImagePullPolicy) Type() string { + return "ImagePullPolicy" +} + +type ImagePullOptions struct { + RegistryAuth string `json:"registryAuth" pflag:",The base64 encoded credentials for the registry."` + Platform string `json:"platform" pflag:",Forces a specific platform's image to be pulled.'"` +} diff --git a/flytectl/pkg/docker/docker_config.go b/flytectl/pkg/docker/docker_config.go new file mode 100644 index 0000000000..a3453d8012 --- /dev/null +++ b/flytectl/pkg/docker/docker_config.go @@ -0,0 +1,12 @@ +package docker + +// Config holds configuration flags for docker command. +var ( + DefaultConfig = &Config{ + Force: false, + } +) + +type Config struct { + Force bool `json:"force" pflag:",Optional. Forcefully delete existing sandbox cluster if it exists."` +} diff --git a/flytectl/pkg/docker/docker_util.go b/flytectl/pkg/docker/docker_util.go new file mode 100644 index 0000000000..e9315cab1e --- /dev/null +++ b/flytectl/pkg/docker/docker_util.go @@ -0,0 +1,401 @@ +package docker + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/client" + "github.com/enescakir/emoji" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/flyteorg/flytectl/clierrors" + "github.com/flyteorg/flytectl/cmd/config/subcommand/docker" + cmdUtil "github.com/flyteorg/flytectl/pkg/commandutils" + f "github.com/flyteorg/flytectl/pkg/filesystemutils" + "github.com/moby/term" +) + +var ( + FlyteSandboxConfigDir = f.FilePathJoin(f.UserHomeDir(), ".flyte", "sandbox") + Kubeconfig = f.FilePathJoin(FlyteSandboxConfigDir, "kubeconfig") + SandboxKubeconfig = f.FilePathJoin(f.UserHomeDir(), ".flyte", "k3s", "k3s.yaml") + SuccessMessage = "Deploying Flyte..." + FlyteSandboxClusterName = "flyte-sandbox" + FlyteSandboxVolumeName = "flyte-sandbox" + FlyteSandboxInternalDir = "/var/lib/flyte" + FlyteSandboxInternalConfigDir = f.FilePathJoin(FlyteSandboxInternalDir, "config") + FlyteSandboxInternalStorageDir = f.FilePathJoin(FlyteSandboxInternalDir, "storage") + Environment = []string{"SANDBOX=1", "KUBERNETES_API_PORT=30086", "FLYTE_HOST=localhost:30081", "FLYTE_AWS_ENDPOINT=http://localhost:30084"} + Source = "/root" + K3sDir = "/etc/rancher/" + Client Docker + Volumes = []mount.Mount{ + { + Type: mount.TypeBind, + Source: f.FilePathJoin(f.UserHomeDir(), ".flyte"), + Target: K3sDir, + }, + } + ExecConfig = types.ExecConfig{ + AttachStderr: true, + Tty: true, + WorkingDir: "/", + AttachStdout: true, + Cmd: []string{}, + } + StdWriterPrefixLen = 8 + StartingBufLen = 32*1024 + StdWriterPrefixLen + 1 + ExtraHosts = []string{"host.docker.internal:host-gateway"} +) + +// GetDockerClient will returns the docker client +func GetDockerClient() (Docker, error) { + if Client == nil { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + fmt.Printf("%v Please Check your docker client %v \n", emoji.GrimacingFace, emoji.Whale) + return nil, err + } + return cli, nil + } + return Client, nil +} + +// GetSandbox will return sandbox container if it exist +func GetSandbox(ctx context.Context, cli Docker) (*types.Container, error) { + containers, err := cli.ContainerList(ctx, types.ContainerListOptions{ + All: true, + }) + if err != nil { + return nil, err + } + for _, v := range containers { + if strings.TrimLeft(v.Names[0], "/") == FlyteSandboxClusterName { + return &v, nil + } + } + return nil, nil +} + +// RemoveSandbox will remove sandbox container if exist +func RemoveSandbox(ctx context.Context, cli Docker, reader io.Reader) error { + c, err := GetSandbox(ctx, cli) + if err != nil { + return err + } + + if c != nil { + if docker.DefaultConfig.Force || cmdUtil.AskForConfirmation("delete existing sandbox cluster", reader) { + err := cli.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{ + Force: true, + }) + return err + } + return errors.New(clierrors.ErrSandboxExists) + } + return nil +} + +// GetDevPorts will return dev cluster (minio + postgres) ports +func GetDevPorts() (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, error) { + return nat.ParsePortSpecs([]string{ + "0.0.0.0:30082:30082", // K8s Dashboard Port + "0.0.0.0:30084:30084", // Minio API Port + "0.0.0.0:30086:30086", // K8s cluster + "0.0.0.0:30088:30088", // Minio Console Port + "0.0.0.0:30089:30089", // Postgres Port + }) +} + +// GetSandboxPorts will return sandbox ports +func GetSandboxPorts() (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, error) { + return nat.ParsePortSpecs([]string{ + // Notice that two host ports are mapped to the same container port in the case of Flyteconsole, this is done to + // support the generated URLs produced by pyflyte run + "0.0.0.0:30080:30081", // Flyteconsole Port. + "0.0.0.0:30081:30081", // Flyteadmin Port + "0.0.0.0:30082:30082", // K8s Dashboard Port + "0.0.0.0:30084:30084", // Minio API Port + "0.0.0.0:30086:30086", // K8s cluster + "0.0.0.0:30088:30088", // Minio Console Port + "0.0.0.0:30089:30089", // Postgres Port + }) +} + +// GetDemoPorts will return demo ports +func GetDemoPorts() (map[nat.Port]struct{}, map[nat.Port][]nat.PortBinding, error) { + return nat.ParsePortSpecs([]string{ + "0.0.0.0:6443:6443", // K3s API Port + "0.0.0.0:30080:30080", // HTTP Port + "0.0.0.0:30000:30000", // Registry Port + "0.0.0.0:30001:30001", // Postgres Port + "0.0.0.0:30002:30002", // Minio API Port (use HTTP port for minio console) + "0.0.0.0:30003:30003", // Buildkit Port + }) +} + +// PullDockerImage will Pull docker image +func PullDockerImage(ctx context.Context, cli Docker, image string, pullPolicy ImagePullPolicy, + imagePullOptions ImagePullOptions, dryRun bool) error { + + if dryRun { + PrintPullImage(image, imagePullOptions) + return nil + } + + var needsPull bool + if pullPolicy == ImagePullPolicyAlways { + needsPull = true + } else { + imageSummary, err := cli.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + return err + } + found := false + for _, img := range imageSummary { + for _, tags := range img.RepoTags { + if image == tags { + found = true + break + } + } + if found { + break + } + } + needsPull = !found + } + + // Image already exists, nothing to do. + if !needsPull { + return nil + } + + // Image needs to be pulled but pull policy prevents it + if pullPolicy == ImagePullPolicyNever { + return fmt.Errorf("Image does not exist, but image pull policy prevents pulling it: %s", image) + } + + fmt.Printf("%v Pulling image %s\n", emoji.Whale, image) + r, err := cli.ImagePull(ctx, image, types.ImagePullOptions{ + RegistryAuth: imagePullOptions.RegistryAuth, + Platform: imagePullOptions.Platform, + }) + if err != nil { + return err + } + defer r.Close() + termFd, isTerm := term.GetFdInfo(os.Stderr) + return jsonmessage.DisplayJSONMessagesStream(r, os.Stderr, termFd, isTerm, nil) +} + +// PrintPullImage helper function to print the sandbox pull image command +func PrintPullImage(image string, pullOptions ImagePullOptions) { + fmt.Printf("%v Run the following command to pull the sandbox image from registry.\n", emoji.Sparkle) + var sb strings.Builder + sb.WriteString("docker pull ") + if len(pullOptions.Platform) > 0 { + sb.WriteString(fmt.Sprintf("--platform %v ", pullOptions.Platform)) + } + sb.WriteString(fmt.Sprintf("%v", image)) + fmt.Printf(" %v \n", sb.String()) +} + +// PrintRemoveContainer helper function to remove sandbox container +func PrintRemoveContainer(name string) { + fmt.Printf("%v Run the following command to remove the existing sandbox\n", emoji.Sparkle) + fmt.Printf(" docker container rm %v --force\n", name) +} + +// PrintCreateContainer helper function to print the docker command to run +func PrintCreateContainer(volumes []mount.Mount, portBindings map[nat.Port][]nat.PortBinding, name, image string, environment []string) { + var sb strings.Builder + fmt.Printf("%v Run the following command to create new sandbox container\n", emoji.Sparkle) + sb.WriteString(" docker create --privileged ") + for portProto, bindings := range portBindings { + srcPort := portProto.Port() + for _, binding := range bindings { + sb.WriteString(fmt.Sprintf("-p %v:%v:%v ", binding.HostIP, srcPort, binding.HostPort)) + } + } + for _, env := range environment { + sb.WriteString(fmt.Sprintf("--env %v ", env)) + } + + for _, volume := range volumes { + sb.WriteString(fmt.Sprintf("--mount type=%v,source=%v,target=%v ", volume.Type, volume.Source, volume.Target)) + } + sb.WriteString(fmt.Sprintf("--name %v ", name)) + sb.WriteString(fmt.Sprintf("%v", image)) + fmt.Printf("%v\n", sb.String()) + fmt.Printf("%v Run the following command to start the sandbox container\n", emoji.Sparkle) + fmt.Printf(" docker start %v\n", name) + fmt.Printf("%v Run the following command to check the logs and monitor the sandbox container and make sure there are no error during startup and then visit flyteconsole\n", emoji.EightSpokedAsterisk) + fmt.Printf(" docker logs -f %v\n", name) +} + +// StartContainer will create and start docker container +func StartContainer(ctx context.Context, cli Docker, volumes []mount.Mount, exposedPorts map[nat.Port]struct{}, + portBindings map[nat.Port][]nat.PortBinding, name, image string, additionalEnvVars []string, dryRun bool) (string, error) { + + // Append the additional env variables to the default list of env + Environment = append(Environment, additionalEnvVars...) + if dryRun { + PrintCreateContainer(volumes, portBindings, name, image, Environment) + return "", nil + } + fmt.Printf("%v Starting container... %v %v\n", emoji.FactoryWorker, emoji.Hammer, emoji.Wrench) + resp, err := cli.ContainerCreate(ctx, &container.Config{ + Env: Environment, + Image: image, + Tty: false, + ExposedPorts: exposedPorts, + }, &container.HostConfig{ + Mounts: volumes, + PortBindings: portBindings, + Privileged: true, + ExtraHosts: ExtraHosts, // add it because linux machine doesn't have this host name by default + }, nil, + nil, name) + + if err != nil { + return "", err + } + + if err := cli.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil { + return "", err + } + return resp.ID, nil +} + +// CopyContainerFile try to create the container, see if the source file is there, copy it to the destination +func CopyContainerFile(ctx context.Context, cli Docker, source, destination, name, image string) error { + resp, err := cli.ContainerCreate(ctx, &container.Config{Image: image}, &container.HostConfig{}, nil, nil, name) + if err != nil { + return err + } + var removeErr error + defer func() { + removeErr = cli.ContainerRemove(context.Background(), resp.ID, types.ContainerRemoveOptions{ + Force: true, + }) + }() + _, err = cli.ContainerStatPath(ctx, resp.ID, source) + if err != nil { + return err + } + reader, _, err := cli.CopyFromContainer(ctx, resp.ID, source) + if err != nil { + return err + } + tarFile := destination + ".tar" + outFile, err := os.Create(tarFile) + if err != nil { + return err + } + defer outFile.Close() + defer reader.Close() + _, err = io.Copy(outFile, reader) + if err != nil { + return err + } + r, _ := os.Open(tarFile) + err = f.ExtractTar(r, destination) + if err != nil { + return err + } + return removeErr +} + +// ReadLogs will return io scanner for reading the logs of a container +func ReadLogs(ctx context.Context, cli Docker, id string) (*bufio.Scanner, error) { + reader, err := cli.ContainerLogs(ctx, id, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }) + if err != nil { + return nil, err + } + return bufio.NewScanner(reader), nil +} + +// WaitForSandbox will wait until it doesn't get success message +func WaitForSandbox(reader *bufio.Scanner, message string) bool { + for reader.Scan() { + if strings.Contains(reader.Text(), message) { + return true + } + fmt.Println(reader.Text()) + } + return false +} + +// ExecCommend will execute a command in container and returns an execution id +func ExecCommend(ctx context.Context, cli Docker, containerID string, command []string) (types.IDResponse, error) { + ExecConfig.Cmd = command + r, err := cli.ContainerExecCreate(ctx, containerID, ExecConfig) + if err != nil { + return types.IDResponse{}, err + } + return r, err +} + +func InspectExecResp(ctx context.Context, cli Docker, containerID string) error { + resp, err := cli.ContainerExecAttach(ctx, containerID, types.ExecStartCheck{}) + if err != nil { + return err + } + _, err = stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader) + if err != nil { + return err + } + return nil +} + +func PrintCreateVolume(name string) { + fmt.Printf("%v Run the following command to create a volume\n", emoji.Sparkle) + fmt.Printf(" docker volume create %v\n", name) +} + +func GetOrCreateVolume( + ctx context.Context, cli Docker, volumeName string, dryRun bool, +) (*types.Volume, error) { + if dryRun { + PrintCreateVolume(volumeName) + return nil, nil + } + + resp, err := cli.VolumeList(ctx, filters.NewArgs( + filters.KeyValuePair{Key: "name", Value: fmt.Sprintf("^%s$", volumeName)}, + )) + if err != nil { + return nil, err + } + switch len(resp.Volumes) { + case 0: + v, err := cli.VolumeCreate(ctx, volume.VolumeCreateBody{Name: volumeName}) + if err != nil { + return nil, err + } + return &v, nil + case 1: + return resp.Volumes[0], nil + default: + // We don't expect to ever arrive at this point + return nil, fmt.Errorf("unexpected error - found multiple volumes with name: %s", volumeName) + } +} diff --git a/flytectl/pkg/docker/docker_util_test.go b/flytectl/pkg/docker/docker_util_test.go new file mode 100644 index 0000000000..0de840bb7c --- /dev/null +++ b/flytectl/pkg/docker/docker_util_test.go @@ -0,0 +1,512 @@ +package docker + +import ( + "archive/tar" + "bufio" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + "time" + + f "github.com/flyteorg/flytectl/pkg/filesystemutils" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + + "github.com/stretchr/testify/mock" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/cmd/config/subcommand/docker" + "github.com/stretchr/testify/assert" +) + +var ( + containers []types.Container + imageName = "cr.flyte.org/flyteorg/flyte-sandbox" +) + +func setupSandbox() { + err := os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte"), os.ModePerm) + if err != nil { + fmt.Println(err) + } + container1 := types.Container{ + ID: "FlyteSandboxClusterName", + Names: []string{ + FlyteSandboxClusterName, + }, + } + containers = append(containers, container1) +} + +func dummyReader() io.ReadCloser { + return io.NopCloser(strings.NewReader("")) +} + +func TestGetSandbox(t *testing.T) { + setupSandbox() + t.Run("Successfully get sandbox container", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + c, err := GetSandbox(ctx, mockDocker) + assert.Equal(t, c.Names[0], FlyteSandboxClusterName) + assert.Nil(t, err) + }) + + t.Run("Successfully get sandbox container with zero result", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + c, err := GetSandbox(ctx, mockDocker) + assert.Nil(t, c) + assert.Nil(t, err) + }) + + t.Run("Error in get sandbox container", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + err := RemoveSandbox(ctx, mockDocker, strings.NewReader("y")) + assert.Nil(t, err) + }) + +} + +func TestRemoveSandboxWithNoReply(t *testing.T) { + setupSandbox() + t.Run("Successfully remove sandbox container", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + + // Verify the attributes + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + err := RemoveSandbox(ctx, mockDocker, strings.NewReader("n")) + assert.NotNil(t, err) + + docker.DefaultConfig.Force = true + err = RemoveSandbox(ctx, mockDocker, strings.NewReader("")) + assert.Nil(t, err) + }) + + t.Run("Successfully remove sandbox container with zero sandbox containers are running", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + + // Verify the attributes + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + err := RemoveSandbox(ctx, mockDocker, strings.NewReader("n")) + assert.Nil(t, err) + }) + +} + +func TestPullDockerImage(t *testing.T) { + t.Run("Successful pull existing image with ImagePullPolicyAlways", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + // Verify the attributes + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{{RepoTags: []string{"nginx:latest"}}}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyAlways, ImagePullOptions{}, false) + assert.Nil(t, err) + }) + + t.Run("Successful pull non-existent image with ImagePullPolicyAlways", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + // Verify the attributes + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyAlways, ImagePullOptions{}, false) + assert.Nil(t, err) + }) + + t.Run("Error in pull image", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + // Verify the attributes + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), fmt.Errorf("error")) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyAlways, ImagePullOptions{}, false) + assert.NotNil(t, err) + }) + + t.Run("Success pull non-existent image with ImagePullPolicyIfNotPresent", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + // Verify the attributes + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyIfNotPresent, ImagePullOptions{}, false) + assert.Nil(t, err) + }) + + t.Run("Success skip existing image with ImagePullPolicyIfNotPresent", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{{RepoTags: []string{"nginx:latest"}}}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyIfNotPresent, ImagePullOptions{}, false) + assert.Nil(t, err) + }) + + t.Run("Success skip existing image with ImagePullPolicyNever", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{{RepoTags: []string{"nginx:latest"}}}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyNever, ImagePullOptions{}, false) + assert.Nil(t, err) + }) + + t.Run("Error non-existent image with ImagePullPolicyNever", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + mockDocker.OnImageListMatch(ctx, types.ImageListOptions{}).Return([]types.ImageSummary{}, nil) + err := PullDockerImage(ctx, mockDocker, "nginx:latest", ImagePullPolicyNever, ImagePullOptions{}, false) + assert.ErrorContains(t, err, "Image does not exist, but image pull policy prevents pulling it") + }) +} + +func TestStartContainer(t *testing.T) { + p1, p2, _ := GetSandboxPorts() + + t.Run("Successfully create a container", func(t *testing.T) { + setupSandbox() + mockDocker := &mocks.Docker{} + ctx := context.Background() + + // Verify the attributes + mockDocker.OnContainerCreate(ctx, &container.Config{ + Env: Environment, + Image: imageName, + Tty: false, + ExposedPorts: p1, + }, &container.HostConfig{ + Mounts: Volumes, + PortBindings: p2, + Privileged: true, + ExtraHosts: ExtraHosts, + }, nil, nil, mock.Anything).Return(container.ContainerCreateCreatedBody{ + ID: "Hello", + }, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + id, err := StartContainer(ctx, mockDocker, Volumes, p1, p2, "nginx", imageName, nil, false) + assert.Nil(t, err) + assert.Greater(t, len(id), 0) + assert.Equal(t, id, "Hello") + }) + + t.Run("Successfully create a container with Env", func(t *testing.T) { + setupSandbox() + mockDocker := &mocks.Docker{} + ctx := context.Background() + // Setup additional env + additionalEnv := []string{"a=1", "b=2"} + expectedEnv := append(Environment, "a=1") + expectedEnv = append(expectedEnv, "b=2") + + // Verify the attributes + mockDocker.OnContainerCreate(ctx, &container.Config{ + Env: expectedEnv, + Image: imageName, + Tty: false, + ExposedPorts: p1, + }, &container.HostConfig{ + Mounts: Volumes, + PortBindings: p2, + Privileged: true, + ExtraHosts: ExtraHosts, + }, nil, nil, mock.Anything).Return(container.ContainerCreateCreatedBody{ + ID: "Hello", + }, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + id, err := StartContainer(ctx, mockDocker, Volumes, p1, p2, "nginx", imageName, additionalEnv, false) + assert.Nil(t, err) + assert.Greater(t, len(id), 0) + assert.Equal(t, id, "Hello") + assert.Equal(t, expectedEnv, Environment) + }) + + t.Run("Error in creating container", func(t *testing.T) { + setupSandbox() + mockDocker := &mocks.Docker{} + ctx := context.Background() + + // Verify the attributes + mockDocker.OnContainerCreate(ctx, &container.Config{ + Env: Environment, + Image: imageName, + Tty: false, + ExposedPorts: p1, + }, &container.HostConfig{ + Mounts: Volumes, + PortBindings: p2, + Privileged: true, + ExtraHosts: ExtraHosts, + }, nil, nil, mock.Anything).Return(container.ContainerCreateCreatedBody{ + ID: "", + }, fmt.Errorf("error")) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + id, err := StartContainer(ctx, mockDocker, Volumes, p1, p2, "nginx", imageName, nil, false) + assert.NotNil(t, err) + assert.Equal(t, len(id), 0) + assert.Equal(t, id, "") + }) + + t.Run("Error in start of a container", func(t *testing.T) { + setupSandbox() + mockDocker := &mocks.Docker{} + ctx := context.Background() + + // Verify the attributes + mockDocker.OnContainerCreate(ctx, &container.Config{ + Env: Environment, + Image: imageName, + Tty: false, + ExposedPorts: p1, + }, &container.HostConfig{ + Mounts: Volumes, + PortBindings: p2, + Privileged: true, + ExtraHosts: ExtraHosts, + }, nil, nil, mock.Anything).Return(container.ContainerCreateCreatedBody{ + ID: "Hello", + }, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(fmt.Errorf("error")) + id, err := StartContainer(ctx, mockDocker, Volumes, p1, p2, "nginx", imageName, nil, false) + assert.NotNil(t, err) + assert.Equal(t, len(id), 0) + assert.Equal(t, id, "") + }) +} + +func TestReadLogs(t *testing.T) { + setupSandbox() + + t.Run("Successfully read logs", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + _, err := ReadLogs(ctx, mockDocker, "test") + assert.Nil(t, err) + }) + + t.Run("Error in reading logs", func(t *testing.T) { + mockDocker := &mocks.Docker{} + ctx := context.Background() + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, fmt.Errorf("error")) + _, err := ReadLogs(ctx, mockDocker, "test") + assert.NotNil(t, err) + }) +} + +func TestWaitForSandbox(t *testing.T) { + setupSandbox() + t.Run("Successfully read logs ", func(t *testing.T) { + reader := bufio.NewScanner(strings.NewReader("hello \n Flyte")) + + check := WaitForSandbox(reader, "Flyte") + assert.Equal(t, true, check) + }) + + t.Run("Error in reading logs ", func(t *testing.T) { + reader := bufio.NewScanner(strings.NewReader("")) + check := WaitForSandbox(reader, "Flyte") + assert.Equal(t, false, check) + }) +} + +func TestDockerClient(t *testing.T) { + t.Run("Successfully get docker mock client", func(t *testing.T) { + mockDocker := &mocks.Docker{} + Client = mockDocker + cli, err := GetDockerClient() + assert.Nil(t, err) + assert.NotNil(t, cli) + }) + t.Run("Successfully get docker client", func(t *testing.T) { + Client = nil + cli, err := GetDockerClient() + assert.Nil(t, err) + assert.NotNil(t, cli) + }) +} + +func TestDockerExec(t *testing.T) { + t.Run("Successfully exec command in container", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + Client = mockDocker + c := ExecConfig + c.Cmd = []string{"ls"} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, c).Return(types.IDResponse{}, nil) + _, err := ExecCommend(ctx, mockDocker, "test", []string{"ls"}) + assert.Nil(t, err) + }) + t.Run("Failed exec command in container", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + Client = mockDocker + c := ExecConfig + c.Cmd = []string{"ls"} + mockDocker.OnContainerExecCreateMatch(ctx, mock.Anything, c).Return(types.IDResponse{}, fmt.Errorf("test")) + _, err := ExecCommend(ctx, mockDocker, "test", []string{"ls"}) + assert.NotNil(t, err) + }) +} + +func TestInspectExecResp(t *testing.T) { + t.Run("Failed exec command in container", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + Client = mockDocker + c := ExecConfig + c.Cmd = []string{"ls"} + reader := bufio.NewReader(strings.NewReader("test")) + + mockDocker.OnContainerExecInspectMatch(ctx, mock.Anything).Return(types.ContainerExecInspect{}, nil) + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, fmt.Errorf("err")) + + err := InspectExecResp(ctx, mockDocker, "test") + assert.NotNil(t, err) + }) + t.Run("Successfully exec command in container", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + Client = mockDocker + c := ExecConfig + c.Cmd = []string{"ls"} + reader := bufio.NewReader(strings.NewReader("test")) + + mockDocker.OnContainerExecAttachMatch(ctx, mock.Anything, types.ExecStartCheck{}).Return(types.HijackedResponse{ + Reader: reader, + }, nil) + + err := InspectExecResp(ctx, mockDocker, "test") + assert.Nil(t, err) + }) + +} + +func TestGetOrCreateVolume(t *testing.T) { + t.Run("VolumeExists", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + expected := &types.Volume{Name: "test"} + + mockDocker.OnVolumeList(ctx, filters.NewArgs(filters.KeyValuePair{Key: "name", Value: "^test$"})).Return(volume.VolumeListOKBody{Volumes: []*types.Volume{expected}}, nil) + actual, err := GetOrCreateVolume(ctx, mockDocker, "test", false) + assert.Equal(t, expected, actual, "volumes should match") + assert.Nil(t, err) + }) + t.Run("VolumeDoesNotExist", func(t *testing.T) { + ctx := context.Background() + mockDocker := &mocks.Docker{} + expected := types.Volume{Name: "test"} + + mockDocker.OnVolumeList(ctx, filters.NewArgs(filters.KeyValuePair{Key: "name", Value: "^test$"})).Return(volume.VolumeListOKBody{Volumes: []*types.Volume{}}, nil) + mockDocker.OnVolumeCreate(ctx, volume.VolumeCreateBody{Name: "test"}).Return(expected, nil) + actual, err := GetOrCreateVolume(ctx, mockDocker, "test", false) + assert.Equal(t, expected, *actual, "volumes should match") + assert.Nil(t, err) + }) + +} + +func TestDemoPorts(t *testing.T) { + _, ports, _ := GetDemoPorts() + assert.Equal(t, 6, len(ports)) +} + +func TestCopyFile(t *testing.T) { + ctx := context.Background() + // Create a fake tar file in tmp. + fo, err := os.CreateTemp("", "sampledata") + assert.NoError(t, err) + tarWriter := tar.NewWriter(fo) + err = tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: "flyte.yaml", + Size: 4, + Mode: 0640, + ModTime: time.Unix(1245206587, 0), + }) + assert.NoError(t, err) + cnt, err := tarWriter.Write([]byte("a: b")) + assert.NoError(t, err) + assert.Equal(t, 4, cnt) + tarWriter.Close() + fo.Close() + + image := "some:image" + containerName := "my-container" + + t.Run("No errors", func(t *testing.T) { + // Create reader of the tar file + reader, err := os.Open(fo.Name()) + assert.NoError(t, err) + // Create destination file name + destDir, err := os.MkdirTemp("", "dest") + assert.NoError(t, err) + destination := filepath.Join(destDir, "destfile") + + // Mocks + mockDocker := &mocks.Docker{} + mockDocker.OnContainerCreate( + ctx, &container.Config{Image: image}, &container.HostConfig{}, nil, nil, containerName).Return( + container.ContainerCreateCreatedBody{ID: containerName}, nil) + mockDocker.OnContainerStatPath(ctx, containerName, "some source").Return(types.ContainerPathStat{}, nil) + mockDocker.OnCopyFromContainer(ctx, containerName, "some source").Return(reader, types.ContainerPathStat{}, nil) + mockDocker.OnContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true}).Return(nil) + assert.Nil(t, err) + + // Run + err = CopyContainerFile(ctx, mockDocker, "some source", destination, containerName, image) + assert.NoError(t, err) + + // Read the file and make sure it's correct + strBytes, err := os.ReadFile(destination) + assert.NoError(t, err) + assert.Equal(t, "a: b", string(strBytes)) + }) + + t.Run("Erroring on stat", func(t *testing.T) { + myErr := fmt.Errorf("erroring on stat") + + // Mocks + mockDocker := &mocks.Docker{} + mockDocker.OnContainerCreate( + ctx, &container.Config{Image: image}, &container.HostConfig{}, nil, nil, containerName).Return( + container.ContainerCreateCreatedBody{ID: containerName}, nil) + mockDocker.OnContainerStatPath(ctx, containerName, "some source").Return(types.ContainerPathStat{}, myErr) + mockDocker.OnContainerRemove(ctx, containerName, types.ContainerRemoveOptions{Force: true}).Return(nil) + assert.Nil(t, err) + + // Run + err = CopyContainerFile(ctx, mockDocker, "some source", "", containerName, image) + assert.Equal(t, myErr, err) + }) +} diff --git a/flytectl/pkg/docker/imagepullpolicy_enumer.go b/flytectl/pkg/docker/imagepullpolicy_enumer.go new file mode 100644 index 0000000000..9a44dc09ab --- /dev/null +++ b/flytectl/pkg/docker/imagepullpolicy_enumer.go @@ -0,0 +1,68 @@ +// Code generated by "enumer -type=ImagePullPolicy -trimprefix=ImagePullPolicy --json"; DO NOT EDIT. + +package docker + +import ( + "encoding/json" + "fmt" +) + +const _ImagePullPolicyName = "AlwaysIfNotPresentNever" + +var _ImagePullPolicyIndex = [...]uint8{0, 6, 18, 23} + +func (i ImagePullPolicy) String() string { + if i < 0 || i >= ImagePullPolicy(len(_ImagePullPolicyIndex)-1) { + return fmt.Sprintf("ImagePullPolicy(%d)", i) + } + return _ImagePullPolicyName[_ImagePullPolicyIndex[i]:_ImagePullPolicyIndex[i+1]] +} + +var _ImagePullPolicyValues = []ImagePullPolicy{0, 1, 2} + +var _ImagePullPolicyNameToValueMap = map[string]ImagePullPolicy{ + _ImagePullPolicyName[0:6]: 0, + _ImagePullPolicyName[6:18]: 1, + _ImagePullPolicyName[18:23]: 2, +} + +// ImagePullPolicyString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ImagePullPolicyString(s string) (ImagePullPolicy, error) { + if val, ok := _ImagePullPolicyNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ImagePullPolicy values", s) +} + +// ImagePullPolicyValues returns all values of the enum +func ImagePullPolicyValues() []ImagePullPolicy { + return _ImagePullPolicyValues +} + +// IsAImagePullPolicy returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ImagePullPolicy) IsAImagePullPolicy() bool { + for _, v := range _ImagePullPolicyValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for ImagePullPolicy +func (i ImagePullPolicy) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for ImagePullPolicy +func (i *ImagePullPolicy) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("ImagePullPolicy should be a string, got %s", data) + } + + var err error + *i, err = ImagePullPolicyString(s) + return err +} diff --git a/flytectl/pkg/docker/mocks/docker.go b/flytectl/pkg/docker/mocks/docker.go new file mode 100644 index 0000000000..b5361fc957 --- /dev/null +++ b/flytectl/pkg/docker/mocks/docker.go @@ -0,0 +1,652 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + container "github.com/docker/docker/api/types/container" + + filters "github.com/docker/docker/api/types/filters" + + io "io" + + mock "github.com/stretchr/testify/mock" + + network "github.com/docker/docker/api/types/network" + + types "github.com/docker/docker/api/types" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" + + volume "github.com/docker/docker/api/types/volume" +) + +// Docker is an autogenerated mock type for the Docker type +type Docker struct { + mock.Mock +} + +type Docker_ContainerCreate struct { + *mock.Call +} + +func (_m Docker_ContainerCreate) Return(_a0 container.ContainerCreateCreatedBody, _a1 error) *Docker_ContainerCreate { + return &Docker_ContainerCreate{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *v1.Platform, containerName string) *Docker_ContainerCreate { + c_call := _m.On("ContainerCreate", ctx, config, hostConfig, networkingConfig, platform, containerName) + return &Docker_ContainerCreate{Call: c_call} +} + +func (_m *Docker) OnContainerCreateMatch(matchers ...interface{}) *Docker_ContainerCreate { + c_call := _m.On("ContainerCreate", matchers...) + return &Docker_ContainerCreate{Call: c_call} +} + +// ContainerCreate provides a mock function with given fields: ctx, config, hostConfig, networkingConfig, platform, containerName +func (_m *Docker) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *v1.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { + ret := _m.Called(ctx, config, hostConfig, networkingConfig, platform, containerName) + + var r0 container.ContainerCreateCreatedBody + if rf, ok := ret.Get(0).(func(context.Context, *container.Config, *container.HostConfig, *network.NetworkingConfig, *v1.Platform, string) container.ContainerCreateCreatedBody); ok { + r0 = rf(ctx, config, hostConfig, networkingConfig, platform, containerName) + } else { + r0 = ret.Get(0).(container.ContainerCreateCreatedBody) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *container.Config, *container.HostConfig, *network.NetworkingConfig, *v1.Platform, string) error); ok { + r1 = rf(ctx, config, hostConfig, networkingConfig, platform, containerName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerExecAttach struct { + *mock.Call +} + +func (_m Docker_ContainerExecAttach) Return(_a0 types.HijackedResponse, _a1 error) *Docker_ContainerExecAttach { + return &Docker_ContainerExecAttach{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) *Docker_ContainerExecAttach { + c_call := _m.On("ContainerExecAttach", ctx, execID, config) + return &Docker_ContainerExecAttach{Call: c_call} +} + +func (_m *Docker) OnContainerExecAttachMatch(matchers ...interface{}) *Docker_ContainerExecAttach { + c_call := _m.On("ContainerExecAttach", matchers...) + return &Docker_ContainerExecAttach{Call: c_call} +} + +// ContainerExecAttach provides a mock function with given fields: ctx, execID, config +func (_m *Docker) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + ret := _m.Called(ctx, execID, config) + + var r0 types.HijackedResponse + if rf, ok := ret.Get(0).(func(context.Context, string, types.ExecStartCheck) types.HijackedResponse); ok { + r0 = rf(ctx, execID, config) + } else { + r0 = ret.Get(0).(types.HijackedResponse) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, types.ExecStartCheck) error); ok { + r1 = rf(ctx, execID, config) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerExecCreate struct { + *mock.Call +} + +func (_m Docker_ContainerExecCreate) Return(_a0 types.IDResponse, _a1 error) *Docker_ContainerExecCreate { + return &Docker_ContainerExecCreate{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerExecCreate(ctx context.Context, _a1 string, config types.ExecConfig) *Docker_ContainerExecCreate { + c_call := _m.On("ContainerExecCreate", ctx, _a1, config) + return &Docker_ContainerExecCreate{Call: c_call} +} + +func (_m *Docker) OnContainerExecCreateMatch(matchers ...interface{}) *Docker_ContainerExecCreate { + c_call := _m.On("ContainerExecCreate", matchers...) + return &Docker_ContainerExecCreate{Call: c_call} +} + +// ContainerExecCreate provides a mock function with given fields: ctx, _a1, config +func (_m *Docker) ContainerExecCreate(ctx context.Context, _a1 string, config types.ExecConfig) (types.IDResponse, error) { + ret := _m.Called(ctx, _a1, config) + + var r0 types.IDResponse + if rf, ok := ret.Get(0).(func(context.Context, string, types.ExecConfig) types.IDResponse); ok { + r0 = rf(ctx, _a1, config) + } else { + r0 = ret.Get(0).(types.IDResponse) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, types.ExecConfig) error); ok { + r1 = rf(ctx, _a1, config) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerExecInspect struct { + *mock.Call +} + +func (_m Docker_ContainerExecInspect) Return(_a0 types.ContainerExecInspect, _a1 error) *Docker_ContainerExecInspect { + return &Docker_ContainerExecInspect{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerExecInspect(ctx context.Context, execID string) *Docker_ContainerExecInspect { + c_call := _m.On("ContainerExecInspect", ctx, execID) + return &Docker_ContainerExecInspect{Call: c_call} +} + +func (_m *Docker) OnContainerExecInspectMatch(matchers ...interface{}) *Docker_ContainerExecInspect { + c_call := _m.On("ContainerExecInspect", matchers...) + return &Docker_ContainerExecInspect{Call: c_call} +} + +// ContainerExecInspect provides a mock function with given fields: ctx, execID +func (_m *Docker) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + ret := _m.Called(ctx, execID) + + var r0 types.ContainerExecInspect + if rf, ok := ret.Get(0).(func(context.Context, string) types.ContainerExecInspect); ok { + r0 = rf(ctx, execID) + } else { + r0 = ret.Get(0).(types.ContainerExecInspect) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, execID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerList struct { + *mock.Call +} + +func (_m Docker_ContainerList) Return(_a0 []types.Container, _a1 error) *Docker_ContainerList { + return &Docker_ContainerList{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerList(ctx context.Context, options types.ContainerListOptions) *Docker_ContainerList { + c_call := _m.On("ContainerList", ctx, options) + return &Docker_ContainerList{Call: c_call} +} + +func (_m *Docker) OnContainerListMatch(matchers ...interface{}) *Docker_ContainerList { + c_call := _m.On("ContainerList", matchers...) + return &Docker_ContainerList{Call: c_call} +} + +// ContainerList provides a mock function with given fields: ctx, options +func (_m *Docker) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + ret := _m.Called(ctx, options) + + var r0 []types.Container + if rf, ok := ret.Get(0).(func(context.Context, types.ContainerListOptions) []types.Container); ok { + r0 = rf(ctx, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Container) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.ContainerListOptions) error); ok { + r1 = rf(ctx, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerLogs struct { + *mock.Call +} + +func (_m Docker_ContainerLogs) Return(_a0 io.ReadCloser, _a1 error) *Docker_ContainerLogs { + return &Docker_ContainerLogs{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerLogs(ctx context.Context, _a1 string, options types.ContainerLogsOptions) *Docker_ContainerLogs { + c_call := _m.On("ContainerLogs", ctx, _a1, options) + return &Docker_ContainerLogs{Call: c_call} +} + +func (_m *Docker) OnContainerLogsMatch(matchers ...interface{}) *Docker_ContainerLogs { + c_call := _m.On("ContainerLogs", matchers...) + return &Docker_ContainerLogs{Call: c_call} +} + +// ContainerLogs provides a mock function with given fields: ctx, _a1, options +func (_m *Docker) ContainerLogs(ctx context.Context, _a1 string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + ret := _m.Called(ctx, _a1, options) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, string, types.ContainerLogsOptions) io.ReadCloser); ok { + r0 = rf(ctx, _a1, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, types.ContainerLogsOptions) error); ok { + r1 = rf(ctx, _a1, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerRemove struct { + *mock.Call +} + +func (_m Docker_ContainerRemove) Return(_a0 error) *Docker_ContainerRemove { + return &Docker_ContainerRemove{Call: _m.Call.Return(_a0)} +} + +func (_m *Docker) OnContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) *Docker_ContainerRemove { + c_call := _m.On("ContainerRemove", ctx, containerID, options) + return &Docker_ContainerRemove{Call: c_call} +} + +func (_m *Docker) OnContainerRemoveMatch(matchers ...interface{}) *Docker_ContainerRemove { + c_call := _m.On("ContainerRemove", matchers...) + return &Docker_ContainerRemove{Call: c_call} +} + +// ContainerRemove provides a mock function with given fields: ctx, containerID, options +func (_m *Docker) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + ret := _m.Called(ctx, containerID, options) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.ContainerRemoveOptions) error); ok { + r0 = rf(ctx, containerID, options) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Docker_ContainerStart struct { + *mock.Call +} + +func (_m Docker_ContainerStart) Return(_a0 error) *Docker_ContainerStart { + return &Docker_ContainerStart{Call: _m.Call.Return(_a0)} +} + +func (_m *Docker) OnContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) *Docker_ContainerStart { + c_call := _m.On("ContainerStart", ctx, containerID, options) + return &Docker_ContainerStart{Call: c_call} +} + +func (_m *Docker) OnContainerStartMatch(matchers ...interface{}) *Docker_ContainerStart { + c_call := _m.On("ContainerStart", matchers...) + return &Docker_ContainerStart{Call: c_call} +} + +// ContainerStart provides a mock function with given fields: ctx, containerID, options +func (_m *Docker) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + ret := _m.Called(ctx, containerID, options) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, types.ContainerStartOptions) error); ok { + r0 = rf(ctx, containerID, options) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Docker_ContainerStatPath struct { + *mock.Call +} + +func (_m Docker_ContainerStatPath) Return(_a0 types.ContainerPathStat, _a1 error) *Docker_ContainerStatPath { + return &Docker_ContainerStatPath{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerStatPath(ctx context.Context, containerID string, path string) *Docker_ContainerStatPath { + c_call := _m.On("ContainerStatPath", ctx, containerID, path) + return &Docker_ContainerStatPath{Call: c_call} +} + +func (_m *Docker) OnContainerStatPathMatch(matchers ...interface{}) *Docker_ContainerStatPath { + c_call := _m.On("ContainerStatPath", matchers...) + return &Docker_ContainerStatPath{Call: c_call} +} + +// ContainerStatPath provides a mock function with given fields: ctx, containerID, path +func (_m *Docker) ContainerStatPath(ctx context.Context, containerID string, path string) (types.ContainerPathStat, error) { + ret := _m.Called(ctx, containerID, path) + + var r0 types.ContainerPathStat + if rf, ok := ret.Get(0).(func(context.Context, string, string) types.ContainerPathStat); ok { + r0 = rf(ctx, containerID, path) + } else { + r0 = ret.Get(0).(types.ContainerPathStat) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, containerID, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ContainerWait struct { + *mock.Call +} + +func (_m Docker_ContainerWait) Return(_a0 <-chan container.ContainerWaitOKBody, _a1 <-chan error) *Docker_ContainerWait { + return &Docker_ContainerWait{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) *Docker_ContainerWait { + c_call := _m.On("ContainerWait", ctx, containerID, condition) + return &Docker_ContainerWait{Call: c_call} +} + +func (_m *Docker) OnContainerWaitMatch(matchers ...interface{}) *Docker_ContainerWait { + c_call := _m.On("ContainerWait", matchers...) + return &Docker_ContainerWait{Call: c_call} +} + +// ContainerWait provides a mock function with given fields: ctx, containerID, condition +func (_m *Docker) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + ret := _m.Called(ctx, containerID, condition) + + var r0 <-chan container.ContainerWaitOKBody + if rf, ok := ret.Get(0).(func(context.Context, string, container.WaitCondition) <-chan container.ContainerWaitOKBody); ok { + r0 = rf(ctx, containerID, condition) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan container.ContainerWaitOKBody) + } + } + + var r1 <-chan error + if rf, ok := ret.Get(1).(func(context.Context, string, container.WaitCondition) <-chan error); ok { + r1 = rf(ctx, containerID, condition) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + return r0, r1 +} + +type Docker_CopyFromContainer struct { + *mock.Call +} + +func (_m Docker_CopyFromContainer) Return(_a0 io.ReadCloser, _a1 types.ContainerPathStat, _a2 error) *Docker_CopyFromContainer { + return &Docker_CopyFromContainer{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *Docker) OnCopyFromContainer(ctx context.Context, containerID string, srcPath string) *Docker_CopyFromContainer { + c_call := _m.On("CopyFromContainer", ctx, containerID, srcPath) + return &Docker_CopyFromContainer{Call: c_call} +} + +func (_m *Docker) OnCopyFromContainerMatch(matchers ...interface{}) *Docker_CopyFromContainer { + c_call := _m.On("CopyFromContainer", matchers...) + return &Docker_CopyFromContainer{Call: c_call} +} + +// CopyFromContainer provides a mock function with given fields: ctx, containerID, srcPath +func (_m *Docker) CopyFromContainer(ctx context.Context, containerID string, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + ret := _m.Called(ctx, containerID, srcPath) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, string, string) io.ReadCloser); ok { + r0 = rf(ctx, containerID, srcPath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 types.ContainerPathStat + if rf, ok := ret.Get(1).(func(context.Context, string, string) types.ContainerPathStat); ok { + r1 = rf(ctx, containerID, srcPath) + } else { + r1 = ret.Get(1).(types.ContainerPathStat) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok { + r2 = rf(ctx, containerID, srcPath) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type Docker_ImageList struct { + *mock.Call +} + +func (_m Docker_ImageList) Return(_a0 []types.ImageSummary, _a1 error) *Docker_ImageList { + return &Docker_ImageList{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnImageList(ctx context.Context, listOption types.ImageListOptions) *Docker_ImageList { + c_call := _m.On("ImageList", ctx, listOption) + return &Docker_ImageList{Call: c_call} +} + +func (_m *Docker) OnImageListMatch(matchers ...interface{}) *Docker_ImageList { + c_call := _m.On("ImageList", matchers...) + return &Docker_ImageList{Call: c_call} +} + +// ImageList provides a mock function with given fields: ctx, listOption +func (_m *Docker) ImageList(ctx context.Context, listOption types.ImageListOptions) ([]types.ImageSummary, error) { + ret := _m.Called(ctx, listOption) + + var r0 []types.ImageSummary + if rf, ok := ret.Get(0).(func(context.Context, types.ImageListOptions) []types.ImageSummary); ok { + r0 = rf(ctx, listOption) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.ImageSummary) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.ImageListOptions) error); ok { + r1 = rf(ctx, listOption) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_ImagePull struct { + *mock.Call +} + +func (_m Docker_ImagePull) Return(_a0 io.ReadCloser, _a1 error) *Docker_ImagePull { + return &Docker_ImagePull{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) *Docker_ImagePull { + c_call := _m.On("ImagePull", ctx, refStr, options) + return &Docker_ImagePull{Call: c_call} +} + +func (_m *Docker) OnImagePullMatch(matchers ...interface{}) *Docker_ImagePull { + c_call := _m.On("ImagePull", matchers...) + return &Docker_ImagePull{Call: c_call} +} + +// ImagePull provides a mock function with given fields: ctx, refStr, options +func (_m *Docker) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ret := _m.Called(ctx, refStr, options) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(context.Context, string, types.ImagePullOptions) io.ReadCloser); ok { + r0 = rf(ctx, refStr, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, types.ImagePullOptions) error); ok { + r1 = rf(ctx, refStr, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_VolumeCreate struct { + *mock.Call +} + +func (_m Docker_VolumeCreate) Return(_a0 types.Volume, _a1 error) *Docker_VolumeCreate { + return &Docker_VolumeCreate{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnVolumeCreate(ctx context.Context, options volume.VolumeCreateBody) *Docker_VolumeCreate { + c_call := _m.On("VolumeCreate", ctx, options) + return &Docker_VolumeCreate{Call: c_call} +} + +func (_m *Docker) OnVolumeCreateMatch(matchers ...interface{}) *Docker_VolumeCreate { + c_call := _m.On("VolumeCreate", matchers...) + return &Docker_VolumeCreate{Call: c_call} +} + +// VolumeCreate provides a mock function with given fields: ctx, options +func (_m *Docker) VolumeCreate(ctx context.Context, options volume.VolumeCreateBody) (types.Volume, error) { + ret := _m.Called(ctx, options) + + var r0 types.Volume + if rf, ok := ret.Get(0).(func(context.Context, volume.VolumeCreateBody) types.Volume); ok { + r0 = rf(ctx, options) + } else { + r0 = ret.Get(0).(types.Volume) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, volume.VolumeCreateBody) error); ok { + r1 = rf(ctx, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_VolumeList struct { + *mock.Call +} + +func (_m Docker_VolumeList) Return(_a0 volume.VolumeListOKBody, _a1 error) *Docker_VolumeList { + return &Docker_VolumeList{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Docker) OnVolumeList(ctx context.Context, filter filters.Args) *Docker_VolumeList { + c_call := _m.On("VolumeList", ctx, filter) + return &Docker_VolumeList{Call: c_call} +} + +func (_m *Docker) OnVolumeListMatch(matchers ...interface{}) *Docker_VolumeList { + c_call := _m.On("VolumeList", matchers...) + return &Docker_VolumeList{Call: c_call} +} + +// VolumeList provides a mock function with given fields: ctx, filter +func (_m *Docker) VolumeList(ctx context.Context, filter filters.Args) (volume.VolumeListOKBody, error) { + ret := _m.Called(ctx, filter) + + var r0 volume.VolumeListOKBody + if rf, ok := ret.Get(0).(func(context.Context, filters.Args) volume.VolumeListOKBody); ok { + r0 = rf(ctx, filter) + } else { + r0 = ret.Get(0).(volume.VolumeListOKBody) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, filters.Args) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Docker_VolumeRemove struct { + *mock.Call +} + +func (_m Docker_VolumeRemove) Return(_a0 error) *Docker_VolumeRemove { + return &Docker_VolumeRemove{Call: _m.Call.Return(_a0)} +} + +func (_m *Docker) OnVolumeRemove(ctx context.Context, volumeID string, force bool) *Docker_VolumeRemove { + c_call := _m.On("VolumeRemove", ctx, volumeID, force) + return &Docker_VolumeRemove{Call: c_call} +} + +func (_m *Docker) OnVolumeRemoveMatch(matchers ...interface{}) *Docker_VolumeRemove { + c_call := _m.On("VolumeRemove", matchers...) + return &Docker_VolumeRemove{Call: c_call} +} + +// VolumeRemove provides a mock function with given fields: ctx, volumeID, force +func (_m *Docker) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + ret := _m.Called(ctx, volumeID, force) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { + r0 = rf(ctx, volumeID, force) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/pkg/ext/attribute_match_deleter_test.go b/flytectl/pkg/ext/attribute_match_deleter_test.go new file mode 100644 index 0000000000..885eff0585 --- /dev/null +++ b/flytectl/pkg/ext/attribute_match_deleter_test.go @@ -0,0 +1,63 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var adminDeleterExt AdminDeleterExtClient + +func deleteAttributeMatchFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminDeleterExt = AdminDeleterExtClient{AdminClient: adminClient} +} + +func TestDeleteWorkflowAttributes(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminDeleterExt.DeleteWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", admin.MatchableResource_TASK_RESOURCE) + assert.Nil(t, err) +} + +func TestDeleteWorkflowAttributesError(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminDeleterExt.DeleteWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestDeleteProjectDomainAttributes(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminDeleterExt.DeleteProjectDomainAttributes(ctx, "dummyProject", "domainValue", admin.MatchableResource_TASK_RESOURCE) + assert.Nil(t, err) +} + +func TestDeleteProjectDomainAttributesError(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminDeleterExt.DeleteProjectDomainAttributes(ctx, "dummyProject", "domainValue", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestDeleteProjectAttributes(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteProjectAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminDeleterExt.DeleteProjectAttributes(ctx, "dummyProject", admin.MatchableResource_TASK_RESOURCE) + assert.Nil(t, err) +} + +func TestDeleteProjectAttributesError(t *testing.T) { + deleteAttributeMatchFetcherSetup() + adminClient.OnDeleteProjectAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminDeleterExt.DeleteProjectAttributes(ctx, "dummyProject", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) +} diff --git a/flytectl/pkg/ext/attribute_match_fetcher.go b/flytectl/pkg/ext/attribute_match_fetcher.go new file mode 100644 index 0000000000..eca2ce8e15 --- /dev/null +++ b/flytectl/pkg/ext/attribute_match_fetcher.go @@ -0,0 +1,66 @@ +package ext + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +func (a *AdminFetcherExtClient) FetchWorkflowAttributes(ctx context.Context, project, domain, name string, + rsType admin.MatchableResource) (*admin.WorkflowAttributesGetResponse, error) { + response, err := a.AdminServiceClient().GetWorkflowAttributes(ctx, &admin.WorkflowAttributesGetRequest{ + Project: project, + Domain: domain, + Workflow: name, + ResourceType: rsType, + }) + if err != nil && status.Code(err) != codes.NotFound { + return nil, err + } + if status.Code(err) == codes.NotFound || + response.GetAttributes() == nil || + response.GetAttributes().GetMatchingAttributes() == nil { + return nil, NewNotFoundError("attribute") + } + return response, nil +} + +func (a *AdminFetcherExtClient) FetchProjectDomainAttributes(ctx context.Context, project, domain string, + rsType admin.MatchableResource) (*admin.ProjectDomainAttributesGetResponse, error) { + response, err := a.AdminServiceClient().GetProjectDomainAttributes(ctx, + &admin.ProjectDomainAttributesGetRequest{ + Project: project, + Domain: domain, + ResourceType: rsType, + }) + if err != nil && status.Code(err) != codes.NotFound { + return nil, err + } + if status.Code(err) == codes.NotFound || + response.GetAttributes() == nil || + response.GetAttributes().GetMatchingAttributes() == nil { + return nil, NewNotFoundError("attribute") + } + return response, nil +} + +func (a *AdminFetcherExtClient) FetchProjectAttributes(ctx context.Context, project string, + rsType admin.MatchableResource) (*admin.ProjectAttributesGetResponse, error) { + response, err := a.AdminServiceClient().GetProjectAttributes(ctx, + &admin.ProjectAttributesGetRequest{ + Project: project, + ResourceType: rsType, + }) + if err != nil && status.Code(err) != codes.NotFound { + return nil, err + } + if status.Code(err) == codes.NotFound || + response.GetAttributes() == nil || + response.GetAttributes().GetMatchingAttributes() == nil { + return nil, NewNotFoundError("attribute") + } + return response, nil +} diff --git a/flytectl/pkg/ext/attribute_match_fetcher_test.go b/flytectl/pkg/ext/attribute_match_fetcher_test.go new file mode 100644 index 0000000000..b9ecff16d6 --- /dev/null +++ b/flytectl/pkg/ext/attribute_match_fetcher_test.go @@ -0,0 +1,101 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var wResp *admin.WorkflowAttributesGetResponse + +var pResp *admin.ProjectDomainAttributesGetResponse + +func getAttributeMatchFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + wResp = &admin.WorkflowAttributesGetResponse{Attributes: &admin.WorkflowAttributes{ + MatchingAttributes: &admin.MatchingAttributes{ + Target: nil, + }}} + pResp = &admin.ProjectDomainAttributesGetResponse{Attributes: &admin.ProjectDomainAttributes{ + Project: "dummyProject", + Domain: "dummyDomain", + MatchingAttributes: &admin.MatchingAttributes{ + Target: nil, + }}} +} + +func TestFetchWorkflowAttributes(t *testing.T) { + getAttributeMatchFetcherSetup() + adminClient.OnGetWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(wResp, nil) + _, err := adminFetcherExt.FetchWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", admin.MatchableResource_TASK_RESOURCE) + assert.Nil(t, err) +} + +func TestFetchWorkflowAttributesError(t *testing.T) { + t.Run("failed api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + adminClient.OnGetWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) + }) + t.Run("empty data from api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + wResp := &admin.WorkflowAttributesGetResponse{} + adminClient.OnGetWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(wResp, nil) + _, err := adminFetcherExt.FetchWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", admin.MatchableResource_TASK_RESOURCE) + assert.NotNil(t, err) + assert.True(t, IsNotFoundError(err)) + assert.EqualError(t, err, "attribute not found") + }) +} + +func TestFetchProjectDomainAttributes(t *testing.T) { + getAttributeMatchFetcherSetup() + adminClient.OnGetProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(pResp, nil) + _, err := adminFetcherExt.FetchProjectDomainAttributes(ctx, "dummyProject", "domainValue", admin.MatchableResource_TASK_RESOURCE) + assert.Nil(t, err) +} + +func TestFetchProjectDomainAttributesError(t *testing.T) { + t.Run("failed api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + adminClient.OnGetProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchProjectDomainAttributes(ctx, "dummyProject", "domainValue", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) + }) + t.Run("empty data from api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + pResp := &admin.ProjectDomainAttributesGetResponse{} + adminClient.OnGetProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(pResp, nil) + _, err := adminFetcherExt.FetchProjectDomainAttributes(ctx, "dummyProject", "domainValue", admin.MatchableResource_TASK_RESOURCE) + assert.NotNil(t, err) + assert.True(t, IsNotFoundError(err)) + assert.EqualError(t, err, "attribute not found") + }) +} + +func TestFetchProjectAttributesError(t *testing.T) { + t.Run("failed api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + adminClient.OnGetProjectAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchProjectAttributes(ctx, "dummyProject", admin.MatchableResource_TASK_RESOURCE) + assert.Equal(t, fmt.Errorf("failed"), err) + }) + t.Run("empty data from api", func(t *testing.T) { + getAttributeMatchFetcherSetup() + pResp := &admin.ProjectAttributesGetResponse{} + adminClient.OnGetProjectAttributesMatch(mock.Anything, mock.Anything).Return(pResp, nil) + _, err := adminFetcherExt.FetchProjectAttributes(ctx, "dummyProject", admin.MatchableResource_TASK_RESOURCE) + assert.NotNil(t, err) + assert.True(t, IsNotFoundError(err)) + assert.EqualError(t, err, "attribute not found") + }) +} diff --git a/flytectl/pkg/ext/attribute_match_updater.go b/flytectl/pkg/ext/attribute_match_updater.go new file mode 100644 index 0000000000..9153b8b35f --- /dev/null +++ b/flytectl/pkg/ext/attribute_match_updater.go @@ -0,0 +1,42 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +func (a *AdminUpdaterExtClient) UpdateWorkflowAttributes(ctx context.Context, project, domain, name string, matchingAttr *admin.MatchingAttributes) error { + _, err := a.AdminServiceClient().UpdateWorkflowAttributes(ctx, &admin.WorkflowAttributesUpdateRequest{ + Attributes: &admin.WorkflowAttributes{ + Project: project, + Domain: domain, + Workflow: name, + MatchingAttributes: matchingAttr, + }, + }) + return err +} + +func (a *AdminUpdaterExtClient) UpdateProjectDomainAttributes(ctx context.Context, project, domain string, matchingAttr *admin.MatchingAttributes) error { + _, err := a.AdminServiceClient().UpdateProjectDomainAttributes(ctx, + &admin.ProjectDomainAttributesUpdateRequest{ + Attributes: &admin.ProjectDomainAttributes{ + Project: project, + Domain: domain, + MatchingAttributes: matchingAttr, + }, + }) + return err +} + +func (a *AdminUpdaterExtClient) UpdateProjectAttributes(ctx context.Context, project string, matchingAttr *admin.MatchingAttributes) error { + _, err := a.AdminServiceClient().UpdateProjectAttributes(ctx, + &admin.ProjectAttributesUpdateRequest{ + Attributes: &admin.ProjectAttributes{ + Project: project, + MatchingAttributes: matchingAttr, + }, + }) + return err +} diff --git a/flytectl/pkg/ext/attribute_match_updater_test.go b/flytectl/pkg/ext/attribute_match_updater_test.go new file mode 100644 index 0000000000..1fab0f6795 --- /dev/null +++ b/flytectl/pkg/ext/attribute_match_updater_test.go @@ -0,0 +1,72 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var adminUpdaterExt AdminUpdaterExtClient + +func updateAttributeMatchFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminUpdaterExt = AdminUpdaterExtClient{AdminClient: adminClient} +} + +func TestUpdateWorkflowAttributes(t *testing.T) { + updateAttributeMatchFetcherSetup() + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{}, + } + adminClient.OnUpdateWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminUpdaterExt.UpdateWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", matchingAttr) + assert.Nil(t, err) +} + +func TestUpdateWorkflowAttributesError(t *testing.T) { + updateAttributeMatchFetcherSetup() + adminClient.OnUpdateWorkflowAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminUpdaterExt.UpdateWorkflowAttributes(ctx, "dummyProject", "domainValue", "workflowName", nil) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestUpdateProjectDomainAttributes(t *testing.T) { + updateAttributeMatchFetcherSetup() + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{}, + } + adminClient.OnUpdateProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminUpdaterExt.UpdateProjectDomainAttributes(ctx, "dummyProject", "domainValue", matchingAttr) + assert.Nil(t, err) +} + +func TestUpdateProjectDomainAttributesError(t *testing.T) { + updateAttributeMatchFetcherSetup() + adminClient.OnUpdateProjectDomainAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminUpdaterExt.UpdateProjectDomainAttributes(ctx, "dummyProject", "domainValue", nil) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestUpdateProjectAttributes(t *testing.T) { + updateAttributeMatchFetcherSetup() + matchingAttr := &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_TaskResourceAttributes{}, + } + adminClient.OnUpdateProjectAttributesMatch(mock.Anything, mock.Anything).Return(nil, nil) + err := adminUpdaterExt.UpdateProjectAttributes(ctx, "dummyProject", matchingAttr) + assert.Nil(t, err) +} + +func TestUpdateProjectAttributesError(t *testing.T) { + updateAttributeMatchFetcherSetup() + adminClient.OnUpdateProjectAttributesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + err := adminUpdaterExt.UpdateProjectAttributes(ctx, "dummyProject", nil) + assert.Equal(t, fmt.Errorf("failed"), err) +} diff --git a/flytectl/pkg/ext/attribute_matcher_deleter.go b/flytectl/pkg/ext/attribute_matcher_deleter.go new file mode 100644 index 0000000000..e53490eaad --- /dev/null +++ b/flytectl/pkg/ext/attribute_matcher_deleter.go @@ -0,0 +1,34 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +func (a *AdminDeleterExtClient) DeleteWorkflowAttributes(ctx context.Context, project, domain, name string, rsType admin.MatchableResource) error { + _, err := a.AdminServiceClient().DeleteWorkflowAttributes(ctx, &admin.WorkflowAttributesDeleteRequest{ + Project: project, + Domain: domain, + Workflow: name, + ResourceType: rsType, + }) + return err +} + +func (a *AdminDeleterExtClient) DeleteProjectDomainAttributes(ctx context.Context, project, domain string, rsType admin.MatchableResource) error { + _, err := a.AdminServiceClient().DeleteProjectDomainAttributes(ctx, &admin.ProjectDomainAttributesDeleteRequest{ + Project: project, + Domain: domain, + ResourceType: rsType, + }) + return err +} + +func (a *AdminDeleterExtClient) DeleteProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) error { + _, err := a.AdminServiceClient().DeleteProjectAttributes(ctx, &admin.ProjectAttributesDeleteRequest{ + Project: project, + ResourceType: rsType, + }) + return err +} diff --git a/flytectl/pkg/ext/deleter.go b/flytectl/pkg/ext/deleter.go new file mode 100644 index 0000000000..6b848cb278 --- /dev/null +++ b/flytectl/pkg/ext/deleter.go @@ -0,0 +1,36 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +//go:generate mockery -all -case=underscore + +// AdminDeleterExtInterface Interface for exposing the update capabilities from the admin +type AdminDeleterExtInterface interface { + AdminServiceClient() service.AdminServiceClient + + // DeleteWorkflowAttributes deletes workflow attributes within a project, domain for a particular matchable resource + DeleteWorkflowAttributes(ctx context.Context, project, domain, name string, rsType admin.MatchableResource) error + + // DeleteProjectDomainAttributes deletes project domain attributes for a particular matchable resource + DeleteProjectDomainAttributes(ctx context.Context, project, domain string, rsType admin.MatchableResource) error + + // DeleteProjectAttributes deletes project attributes for a particular matchable resource + DeleteProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) error +} + +// AdminDeleterExtClient is used for interacting with extended features used for deleting/archiving data in admin service +type AdminDeleterExtClient struct { + AdminClient service.AdminServiceClient +} + +func (a *AdminDeleterExtClient) AdminServiceClient() service.AdminServiceClient { + if a == nil { + return nil + } + return a.AdminClient +} diff --git a/flytectl/pkg/ext/deleter_test.go b/flytectl/pkg/ext/deleter_test.go new file mode 100644 index 0000000000..7a307577bb --- /dev/null +++ b/flytectl/pkg/ext/deleter_test.go @@ -0,0 +1,17 @@ +package ext + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/stretchr/testify/assert" +) + +var deleterFetcherClient *AdminDeleterExtClient + +func TestAdminDeleterExtClient_AdminServiceClient(t *testing.T) { + adminClient = new(mocks.AdminServiceClient) + deleterFetcherClient = nil + client := deleterFetcherClient.AdminServiceClient() + assert.Nil(t, client) +} diff --git a/flytectl/pkg/ext/doc.go b/flytectl/pkg/ext/doc.go new file mode 100644 index 0000000000..aa5073ed52 --- /dev/null +++ b/flytectl/pkg/ext/doc.go @@ -0,0 +1,2 @@ +// Package ext Provides Fetch,Update and Delete extensions to the admin API's whose interface directly relates to flytectl commands +package ext diff --git a/flytectl/pkg/ext/errors.go b/flytectl/pkg/ext/errors.go new file mode 100644 index 0000000000..4f51601a28 --- /dev/null +++ b/flytectl/pkg/ext/errors.go @@ -0,0 +1,24 @@ +package ext + +import ( + "errors" + "fmt" +) + +type NotFoundError struct { + Target string +} + +func (err *NotFoundError) Error() string { + return fmt.Sprintf("%s not found", err.Target) +} + +func NewNotFoundError(targetFormat string, formatArgs ...any) *NotFoundError { + target := fmt.Sprintf(targetFormat, formatArgs...) + return &NotFoundError{target} +} + +func IsNotFoundError(err error) bool { + var notFoundErr *NotFoundError + return errors.As(err, ¬FoundErr) +} diff --git a/flytectl/pkg/ext/execution_fetcher.go b/flytectl/pkg/ext/execution_fetcher.go new file mode 100644 index 0000000000..41ebea9b16 --- /dev/null +++ b/flytectl/pkg/ext/execution_fetcher.go @@ -0,0 +1,86 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/pkg/filters" +) + +func (a *AdminFetcherExtClient) FetchExecution(ctx context.Context, name, project, domain string) (*admin.Execution, error) { + e, err := a.AdminServiceClient().GetExecution(ctx, &admin.WorkflowExecutionGetRequest{ + Id: &core.WorkflowExecutionIdentifier{ + Project: project, + Domain: domain, + Name: name, + }, + }) + if err != nil { + return nil, err + } + return e, nil +} + +func (a *AdminFetcherExtClient) FetchNodeExecutionData(ctx context.Context, nodeID, execName, project, domain string) (*admin.NodeExecutionGetDataResponse, error) { + ne, err := a.AdminServiceClient().GetNodeExecutionData(ctx, &admin.NodeExecutionGetDataRequest{ + Id: &core.NodeExecutionIdentifier{ + NodeId: nodeID, + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: project, + Domain: domain, + Name: execName, + }, + }, + }) + if err != nil { + return nil, err + } + return ne, nil +} + +func (a *AdminFetcherExtClient) FetchNodeExecutionDetails(ctx context.Context, name, project, domain, uniqueParentID string) (*admin.NodeExecutionList, error) { + ne, err := a.AdminServiceClient().ListNodeExecutions(ctx, &admin.NodeExecutionListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: project, + Domain: domain, + Name: name, + }, + UniqueParentId: uniqueParentID, + Limit: 100, + }) + if err != nil { + return nil, err + } + return ne, nil +} + +func (a *AdminFetcherExtClient) FetchTaskExecutionsOnNode(ctx context.Context, nodeID, execName, project, domain string) (*admin.TaskExecutionList, error) { + te, err := a.AdminServiceClient().ListTaskExecutions(ctx, &admin.TaskExecutionListRequest{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + NodeId: nodeID, + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: project, + Domain: domain, + Name: execName, + }, + }, + Limit: 100, + }) + if err != nil { + return nil, err + } + return te, nil +} + +func (a *AdminFetcherExtClient) ListExecution(ctx context.Context, project, domain string, filter filters.Filters) (*admin.ExecutionList, error) { + transformFilters, err := filters.BuildResourceListRequestWithName(filter, project, domain, "") + if err != nil { + return nil, err + } + e, err := a.AdminServiceClient().ListExecutions(ctx, transformFilters) + if err != nil { + return nil, err + } + return e, nil +} diff --git a/flytectl/pkg/ext/execution_fetcher_test.go b/flytectl/pkg/ext/execution_fetcher_test.go new file mode 100644 index 0000000000..304f929880 --- /dev/null +++ b/flytectl/pkg/ext/execution_fetcher_test.go @@ -0,0 +1,111 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var ( + executionResponse *admin.Execution +) + +func getExecutionFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + projectValue := "dummyProject" + domainValue := "domainValue" + executionNameValue := "execName" + launchPlanNameValue := "launchPlanNameValue" + launchPlanVersionValue := "launchPlanVersionValue" + workflowNameValue := "workflowNameValue" + workflowVersionValue := "workflowVersionValue" + executionResponse = &admin.Execution{ + Id: &core.WorkflowExecutionIdentifier{ + Project: projectValue, + Domain: domainValue, + Name: executionNameValue, + }, + Spec: &admin.ExecutionSpec{ + LaunchPlan: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: launchPlanNameValue, + Version: launchPlanVersionValue, + }, + }, + Closure: &admin.ExecutionClosure{ + WorkflowId: &core.Identifier{ + Project: projectValue, + Domain: domainValue, + Name: workflowNameValue, + Version: workflowVersionValue, + }, + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } +} + +func TestFetchExecutionVersion(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnGetExecutionMatch(mock.Anything, mock.Anything).Return(executionResponse, nil) + _, err := adminFetcherExt.FetchExecution(ctx, "execName", "dummyProject", "domainValue") + assert.Nil(t, err) +} + +func TestFetchExecutionError(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnGetExecutionMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchExecution(ctx, "execName", "dummyProject", "domainValue") + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchNodeExecutionDetails(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnListNodeExecutionsMatch(mock.Anything, mock.Anything).Return(&admin.NodeExecutionList{}, nil) + _, err := adminFetcherExt.FetchNodeExecutionDetails(ctx, "execName", "dummyProject", "domainValue", "") + assert.Nil(t, err) +} + +func TestFetchNodeExecutionDetailsError(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnListNodeExecutionsMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchNodeExecutionDetails(ctx, "execName", "dummyProject", "domainValue", "") + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchTaskExecOnNode(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnListTaskExecutionsMatch(mock.Anything, mock.Anything).Return(&admin.TaskExecutionList{}, nil) + _, err := adminFetcherExt.FetchTaskExecutionsOnNode(ctx, "nodeId", "execName", "dummyProject", "domainValue") + assert.Nil(t, err) +} + +func TestFetchTaskExecOnNodeError(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnListTaskExecutionsMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchTaskExecutionsOnNode(ctx, "nodeId", "execName", "dummyProject", "domainValue") + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchNodeData(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnGetNodeExecutionDataMatch(mock.Anything, mock.Anything).Return(&admin.NodeExecutionGetDataResponse{}, nil) + _, err := adminFetcherExt.FetchNodeExecutionData(ctx, "nodeId", "execName", "dummyProject", "domainValue") + assert.Nil(t, err) +} + +func TestFetchNodeDataError(t *testing.T) { + getExecutionFetcherSetup() + adminClient.OnGetNodeExecutionDataMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchNodeExecutionData(ctx, "nodeId", "execName", "dummyProject", "domainValue") + assert.Equal(t, fmt.Errorf("failed"), err) +} diff --git a/flytectl/pkg/ext/fetcher.go b/flytectl/pkg/ext/fetcher.go new file mode 100644 index 0000000000..2df2f2799c --- /dev/null +++ b/flytectl/pkg/ext/fetcher.go @@ -0,0 +1,90 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +//go:generate mockery -all -case=underscore + +// AdminFetcherExtInterface Interface for exposing the fetch capabilities from the admin and also allow this to be injectable into other +// modules. eg : create execution which requires to fetch launchplan details to construct the execution spec. +type AdminFetcherExtInterface interface { + AdminServiceClient() service.AdminServiceClient + + // FetchExecution fetches the execution based on name, project, domain + FetchExecution(ctx context.Context, name, project, domain string) (*admin.Execution, error) + + // FetchNodeExecutionDetails fetches the node execution details based on execution name, project, domain, uniqueParentId + FetchNodeExecutionDetails(ctx context.Context, name, project, domain, uniqueParentID string) (*admin.NodeExecutionList, error) + + // FetchNodeExecutionData fetches the node execution data based on nodeId, execution name, project, domain + FetchNodeExecutionData(ctx context.Context, nodeID, execName, project, domain string) (*admin.NodeExecutionGetDataResponse, error) + + // FetchTaskExecutionsOnNode fetches task execution on a node , for give execution name, project, domain + FetchTaskExecutionsOnNode(ctx context.Context, nodeID, execName, project, domain string) (*admin.TaskExecutionList, error) + + // ListExecution fetches the all versions of based on name, project, domain + ListExecution(ctx context.Context, project, domain string, filter filters.Filters) (*admin.ExecutionList, error) + + // FetchAllVerOfLP fetches all versions of launch plan in a project, domain + FetchAllVerOfLP(ctx context.Context, lpName, project, domain string, filter filters.Filters) ([]*admin.LaunchPlan, error) + + // FetchLPLatestVersion fetches latest version of launch plan in a project, domain + FetchLPLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.LaunchPlan, error) + + // FetchLPVersion fetches particular version of launch plan in a project, domain + FetchLPVersion(ctx context.Context, name, version, project, domain string) (*admin.LaunchPlan, error) + + // FetchAllVerOfTask fetches all versions of task in a project, domain + FetchAllVerOfTask(ctx context.Context, name, project, domain string, filter filters.Filters) ([]*admin.Task, error) + + // FetchTaskLatestVersion fetches latest version of task in a project, domain + FetchTaskLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Task, error) + + // FetchTaskVersion fetches particular version of task in a project, domain + FetchTaskVersion(ctx context.Context, name, version, project, domain string) (*admin.Task, error) + + // FetchAllWorkflows fetches all workflows in project domain + FetchAllWorkflows(ctx context.Context, project, domain string, filter filters.Filters) ([]*admin.NamedEntity, error) + + // FetchAllVerOfWorkflow fetches all versions of task in a project, domain + FetchAllVerOfWorkflow(ctx context.Context, name, project, domain string, filter filters.Filters) ([]*admin.Workflow, error) + + // FetchWorkflowLatestVersion fetches latest version of workflow in a project, domain + FetchWorkflowLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Workflow, error) + + // FetchWorkflowVersion fetches particular version of workflow in a project, domain + FetchWorkflowVersion(ctx context.Context, name, version, project, domain string) (*admin.Workflow, error) + + // FetchWorkflowAttributes fetches workflow attributes particular resource type in a project, domain and workflow + FetchWorkflowAttributes(ctx context.Context, project, domain, name string, rsType admin.MatchableResource) (*admin.WorkflowAttributesGetResponse, error) + + // FetchProjectDomainAttributes fetches project domain attributes particular resource type in a project, domain + FetchProjectDomainAttributes(ctx context.Context, project, domain string, rsType admin.MatchableResource) (*admin.ProjectDomainAttributesGetResponse, error) + + // FetchProjectAttributes fetches project attributes particular resource type in a project + FetchProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) (*admin.ProjectAttributesGetResponse, error) + + // ListProjects fetches all projects + ListProjects(ctx context.Context, filter filters.Filters) (*admin.Projects, error) + + // GetProjectByID fetches a single project by its identifier. If project does not exist, an error will be returned + GetProjectByID(ctx context.Context, projectID string) (*admin.Project, error) +} + +// AdminFetcherExtClient is used for interacting with extended features used for fetching data from admin service +type AdminFetcherExtClient struct { + AdminClient service.AdminServiceClient +} + +func (a *AdminFetcherExtClient) AdminServiceClient() service.AdminServiceClient { + if a == nil { + return nil + } + return a.AdminClient +} diff --git a/flytectl/pkg/ext/fetcher_test.go b/flytectl/pkg/ext/fetcher_test.go new file mode 100644 index 0000000000..2654f5b538 --- /dev/null +++ b/flytectl/pkg/ext/fetcher_test.go @@ -0,0 +1,17 @@ +package ext + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/stretchr/testify/assert" +) + +var fetcherClient *AdminFetcherExtClient + +func TestAdminFetcherExtClient_AdminServiceClient(t *testing.T) { + adminClient = new(mocks.AdminServiceClient) + fetcherClient = nil + client := fetcherClient.AdminServiceClient() + assert.Nil(t, client) +} diff --git a/flytectl/pkg/ext/launch_plan_fetcher.go b/flytectl/pkg/ext/launch_plan_fetcher.go new file mode 100644 index 0000000000..76ee524993 --- /dev/null +++ b/flytectl/pkg/ext/launch_plan_fetcher.go @@ -0,0 +1,55 @@ +package ext + +import ( + "context" + "fmt" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +// FetchAllVerOfLP fetches all the versions for give launch plan name +func (a *AdminFetcherExtClient) FetchAllVerOfLP(ctx context.Context, lpName, project, domain string, filter filters.Filters) ([]*admin.LaunchPlan, error) { + transformFilters, err := filters.BuildResourceListRequestWithName(filter, project, domain, lpName) + if err != nil { + return nil, err + } + tList, err := a.AdminServiceClient().ListLaunchPlans(ctx, transformFilters) + if err != nil { + return nil, err + } + if len(tList.LaunchPlans) == 0 { + return nil, fmt.Errorf("no launchplans retrieved for %v", lpName) + } + return tList.LaunchPlans, nil +} + +// FetchLPLatestVersion fetches latest version for give launch plan name +func (a *AdminFetcherExtClient) FetchLPLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.LaunchPlan, error) { + // Fetch the latest version of the task. + lpVersions, err := a.FetchAllVerOfLP(ctx, name, project, domain, filter) + if err != nil { + return nil, err + } + lp := lpVersions[0] + return lp, nil +} + +// FetchLPVersion fetches particular version of launch plan +func (a *AdminFetcherExtClient) FetchLPVersion(ctx context.Context, name, version, project, domain string) (*admin.LaunchPlan, error) { + lp, err := a.AdminServiceClient().GetLaunchPlan(ctx, &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Project: project, + Domain: domain, + Name: name, + Version: version, + }, + }) + if err != nil { + return nil, err + } + return lp, nil +} diff --git a/flytectl/pkg/ext/launch_plan_fetcher_test.go b/flytectl/pkg/ext/launch_plan_fetcher_test.go new file mode 100644 index 0000000000..c2cf3140a4 --- /dev/null +++ b/flytectl/pkg/ext/launch_plan_fetcher_test.go @@ -0,0 +1,171 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + launchPlanListResponse *admin.LaunchPlanList + lpFilters = filters.Filters{} + launchPlan1 *admin.LaunchPlan +) + +func getLaunchPlanFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + + parameterMap := map[string]*core.Parameter{ + "numbers": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + }, + }, + "numbers_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + "run_local_at_count": { + Var: &core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + Behavior: &core.Parameter_Default{ + Default: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 10, + }, + }, + }, + }, + }, + }, + }, + }, + } + launchPlan1 = &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "launchplan1", + Version: "v1", + }, + Spec: &admin.LaunchPlanSpec{ + DefaultInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + } + launchPlan2 := &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "launchplan1", + Version: "v2", + }, + Spec: &admin.LaunchPlanSpec{ + DefaultInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{ + Parameters: parameterMap, + }, + }, + } + + launchPlans := []*admin.LaunchPlan{launchPlan2, launchPlan1} + + launchPlanListResponse = &admin.LaunchPlanList{ + LaunchPlans: launchPlans, + } +} + +func TestFetchAllVerOfLP(t *testing.T) { + getLaunchPlanFetcherSetup() + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(launchPlanListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfLP(ctx, "lpName", "project", "domain", lpFilters) + assert.Nil(t, err) +} + +func TestFetchLPVersion(t *testing.T) { + getLaunchPlanFetcherSetup() + adminClient.OnGetLaunchPlanMatch(mock.Anything, mock.Anything).Return(launchPlan1, nil) + _, err := adminFetcherExt.FetchLPVersion(ctx, "launchplan1", "v1", "project", "domain") + assert.Nil(t, err) +} + +func TestFetchAllVerOfLPError(t *testing.T) { + getLaunchPlanFetcherSetup() + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchAllVerOfLP(ctx, "lpName", "project", "domain", lpFilters) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchAllVerOfLPFilterError(t *testing.T) { + getLaunchPlanFetcherSetup() + lpFilters.FieldSelector = "hello=" + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("Please add a valid field selector")) + _, err := adminFetcherExt.FetchAllVerOfLP(ctx, "lpName", "project", "domain", lpFilters) + assert.Equal(t, fmt.Errorf("Please add a valid field selector"), err) +} + +func TestFetchAllVerOfLPEmptyResponse(t *testing.T) { + launchPlanListResponse := &admin.LaunchPlanList{} + getLaunchPlanFetcherSetup() + lpFilters.FieldSelector = "" + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(launchPlanListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfLP(ctx, "lpName", "project", "domain", lpFilters) + assert.Equal(t, fmt.Errorf("no launchplans retrieved for lpName"), err) +} + +func TestFetchLPLatestVersion(t *testing.T) { + getLaunchPlanFetcherSetup() + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(launchPlanListResponse, nil) + _, err := adminFetcherExt.FetchLPLatestVersion(ctx, "lpName", "project", "domain", lpFilters) + assert.Nil(t, err) +} + +func TestFetchLPLatestVersionError(t *testing.T) { + launchPlanListResponse := &admin.LaunchPlanList{} + getLaunchPlanFetcherSetup() + adminClient.OnListLaunchPlansMatch(mock.Anything, mock.Anything).Return(launchPlanListResponse, nil) + _, err := adminFetcherExt.FetchLPLatestVersion(ctx, "lpName", "project", "domain", lpFilters) + assert.Equal(t, fmt.Errorf("no launchplans retrieved for lpName"), err) +} diff --git a/flytectl/pkg/ext/mocks/admin_deleter_ext_interface.go b/flytectl/pkg/ext/mocks/admin_deleter_ext_interface.go new file mode 100644 index 0000000000..c165501cd5 --- /dev/null +++ b/flytectl/pkg/ext/mocks/admin_deleter_ext_interface.go @@ -0,0 +1,148 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + mock "github.com/stretchr/testify/mock" + + service "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +// AdminDeleterExtInterface is an autogenerated mock type for the AdminDeleterExtInterface type +type AdminDeleterExtInterface struct { + mock.Mock +} + +type AdminDeleterExtInterface_AdminServiceClient struct { + *mock.Call +} + +func (_m AdminDeleterExtInterface_AdminServiceClient) Return(_a0 service.AdminServiceClient) *AdminDeleterExtInterface_AdminServiceClient { + return &AdminDeleterExtInterface_AdminServiceClient{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminDeleterExtInterface) OnAdminServiceClient() *AdminDeleterExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient") + return &AdminDeleterExtInterface_AdminServiceClient{Call: c_call} +} + +func (_m *AdminDeleterExtInterface) OnAdminServiceClientMatch(matchers ...interface{}) *AdminDeleterExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient", matchers...) + return &AdminDeleterExtInterface_AdminServiceClient{Call: c_call} +} + +// AdminServiceClient provides a mock function with given fields: +func (_m *AdminDeleterExtInterface) AdminServiceClient() service.AdminServiceClient { + ret := _m.Called() + + var r0 service.AdminServiceClient + if rf, ok := ret.Get(0).(func() service.AdminServiceClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(service.AdminServiceClient) + } + } + + return r0 +} + +type AdminDeleterExtInterface_DeleteProjectAttributes struct { + *mock.Call +} + +func (_m AdminDeleterExtInterface_DeleteProjectAttributes) Return(_a0 error) *AdminDeleterExtInterface_DeleteProjectAttributes { + return &AdminDeleterExtInterface_DeleteProjectAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminDeleterExtInterface) OnDeleteProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) *AdminDeleterExtInterface_DeleteProjectAttributes { + c_call := _m.On("DeleteProjectAttributes", ctx, project, rsType) + return &AdminDeleterExtInterface_DeleteProjectAttributes{Call: c_call} +} + +func (_m *AdminDeleterExtInterface) OnDeleteProjectAttributesMatch(matchers ...interface{}) *AdminDeleterExtInterface_DeleteProjectAttributes { + c_call := _m.On("DeleteProjectAttributes", matchers...) + return &AdminDeleterExtInterface_DeleteProjectAttributes{Call: c_call} +} + +// DeleteProjectAttributes provides a mock function with given fields: ctx, project, rsType +func (_m *AdminDeleterExtInterface) DeleteProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) error { + ret := _m.Called(ctx, project, rsType) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, admin.MatchableResource) error); ok { + r0 = rf(ctx, project, rsType) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type AdminDeleterExtInterface_DeleteProjectDomainAttributes struct { + *mock.Call +} + +func (_m AdminDeleterExtInterface_DeleteProjectDomainAttributes) Return(_a0 error) *AdminDeleterExtInterface_DeleteProjectDomainAttributes { + return &AdminDeleterExtInterface_DeleteProjectDomainAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminDeleterExtInterface) OnDeleteProjectDomainAttributes(ctx context.Context, project string, domain string, rsType admin.MatchableResource) *AdminDeleterExtInterface_DeleteProjectDomainAttributes { + c_call := _m.On("DeleteProjectDomainAttributes", ctx, project, domain, rsType) + return &AdminDeleterExtInterface_DeleteProjectDomainAttributes{Call: c_call} +} + +func (_m *AdminDeleterExtInterface) OnDeleteProjectDomainAttributesMatch(matchers ...interface{}) *AdminDeleterExtInterface_DeleteProjectDomainAttributes { + c_call := _m.On("DeleteProjectDomainAttributes", matchers...) + return &AdminDeleterExtInterface_DeleteProjectDomainAttributes{Call: c_call} +} + +// DeleteProjectDomainAttributes provides a mock function with given fields: ctx, project, domain, rsType +func (_m *AdminDeleterExtInterface) DeleteProjectDomainAttributes(ctx context.Context, project string, domain string, rsType admin.MatchableResource) error { + ret := _m.Called(ctx, project, domain, rsType) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, admin.MatchableResource) error); ok { + r0 = rf(ctx, project, domain, rsType) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type AdminDeleterExtInterface_DeleteWorkflowAttributes struct { + *mock.Call +} + +func (_m AdminDeleterExtInterface_DeleteWorkflowAttributes) Return(_a0 error) *AdminDeleterExtInterface_DeleteWorkflowAttributes { + return &AdminDeleterExtInterface_DeleteWorkflowAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminDeleterExtInterface) OnDeleteWorkflowAttributes(ctx context.Context, project string, domain string, name string, rsType admin.MatchableResource) *AdminDeleterExtInterface_DeleteWorkflowAttributes { + c_call := _m.On("DeleteWorkflowAttributes", ctx, project, domain, name, rsType) + return &AdminDeleterExtInterface_DeleteWorkflowAttributes{Call: c_call} +} + +func (_m *AdminDeleterExtInterface) OnDeleteWorkflowAttributesMatch(matchers ...interface{}) *AdminDeleterExtInterface_DeleteWorkflowAttributes { + c_call := _m.On("DeleteWorkflowAttributes", matchers...) + return &AdminDeleterExtInterface_DeleteWorkflowAttributes{Call: c_call} +} + +// DeleteWorkflowAttributes provides a mock function with given fields: ctx, project, domain, name, rsType +func (_m *AdminDeleterExtInterface) DeleteWorkflowAttributes(ctx context.Context, project string, domain string, name string, rsType admin.MatchableResource) error { + ret := _m.Called(ctx, project, domain, name, rsType) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, admin.MatchableResource) error); ok { + r0 = rf(ctx, project, domain, name, rsType) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/pkg/ext/mocks/admin_fetcher_ext_interface.go b/flytectl/pkg/ext/mocks/admin_fetcher_ext_interface.go new file mode 100644 index 0000000000..b2e9814fb5 --- /dev/null +++ b/flytectl/pkg/ext/mocks/admin_fetcher_ext_interface.go @@ -0,0 +1,874 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + filters "github.com/flyteorg/flytectl/pkg/filters" + + mock "github.com/stretchr/testify/mock" + + service "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +// AdminFetcherExtInterface is an autogenerated mock type for the AdminFetcherExtInterface type +type AdminFetcherExtInterface struct { + mock.Mock +} + +type AdminFetcherExtInterface_AdminServiceClient struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_AdminServiceClient) Return(_a0 service.AdminServiceClient) *AdminFetcherExtInterface_AdminServiceClient { + return &AdminFetcherExtInterface_AdminServiceClient{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminFetcherExtInterface) OnAdminServiceClient() *AdminFetcherExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient") + return &AdminFetcherExtInterface_AdminServiceClient{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnAdminServiceClientMatch(matchers ...interface{}) *AdminFetcherExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient", matchers...) + return &AdminFetcherExtInterface_AdminServiceClient{Call: c_call} +} + +// AdminServiceClient provides a mock function with given fields: +func (_m *AdminFetcherExtInterface) AdminServiceClient() service.AdminServiceClient { + ret := _m.Called() + + var r0 service.AdminServiceClient + if rf, ok := ret.Get(0).(func() service.AdminServiceClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(service.AdminServiceClient) + } + } + + return r0 +} + +type AdminFetcherExtInterface_FetchAllVerOfLP struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchAllVerOfLP) Return(_a0 []*admin.LaunchPlan, _a1 error) *AdminFetcherExtInterface_FetchAllVerOfLP { + return &AdminFetcherExtInterface_FetchAllVerOfLP{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfLP(ctx context.Context, lpName string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchAllVerOfLP { + c_call := _m.On("FetchAllVerOfLP", ctx, lpName, project, domain, filter) + return &AdminFetcherExtInterface_FetchAllVerOfLP{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfLPMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchAllVerOfLP { + c_call := _m.On("FetchAllVerOfLP", matchers...) + return &AdminFetcherExtInterface_FetchAllVerOfLP{Call: c_call} +} + +// FetchAllVerOfLP provides a mock function with given fields: ctx, lpName, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchAllVerOfLP(ctx context.Context, lpName string, project string, domain string, filter filters.Filters) ([]*admin.LaunchPlan, error) { + ret := _m.Called(ctx, lpName, project, domain, filter) + + var r0 []*admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) []*admin.LaunchPlan); ok { + r0 = rf(ctx, lpName, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, lpName, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchAllVerOfTask struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchAllVerOfTask) Return(_a0 []*admin.Task, _a1 error) *AdminFetcherExtInterface_FetchAllVerOfTask { + return &AdminFetcherExtInterface_FetchAllVerOfTask{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfTask(ctx context.Context, name string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchAllVerOfTask { + c_call := _m.On("FetchAllVerOfTask", ctx, name, project, domain, filter) + return &AdminFetcherExtInterface_FetchAllVerOfTask{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfTaskMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchAllVerOfTask { + c_call := _m.On("FetchAllVerOfTask", matchers...) + return &AdminFetcherExtInterface_FetchAllVerOfTask{Call: c_call} +} + +// FetchAllVerOfTask provides a mock function with given fields: ctx, name, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchAllVerOfTask(ctx context.Context, name string, project string, domain string, filter filters.Filters) ([]*admin.Task, error) { + ret := _m.Called(ctx, name, project, domain, filter) + + var r0 []*admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) []*admin.Task); ok { + r0 = rf(ctx, name, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, name, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchAllVerOfWorkflow struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchAllVerOfWorkflow) Return(_a0 []*admin.Workflow, _a1 error) *AdminFetcherExtInterface_FetchAllVerOfWorkflow { + return &AdminFetcherExtInterface_FetchAllVerOfWorkflow{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfWorkflow(ctx context.Context, name string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchAllVerOfWorkflow { + c_call := _m.On("FetchAllVerOfWorkflow", ctx, name, project, domain, filter) + return &AdminFetcherExtInterface_FetchAllVerOfWorkflow{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllVerOfWorkflowMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchAllVerOfWorkflow { + c_call := _m.On("FetchAllVerOfWorkflow", matchers...) + return &AdminFetcherExtInterface_FetchAllVerOfWorkflow{Call: c_call} +} + +// FetchAllVerOfWorkflow provides a mock function with given fields: ctx, name, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchAllVerOfWorkflow(ctx context.Context, name string, project string, domain string, filter filters.Filters) ([]*admin.Workflow, error) { + ret := _m.Called(ctx, name, project, domain, filter) + + var r0 []*admin.Workflow + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) []*admin.Workflow); ok { + r0 = rf(ctx, name, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.Workflow) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, name, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchAllWorkflows struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchAllWorkflows) Return(_a0 []*admin.NamedEntity, _a1 error) *AdminFetcherExtInterface_FetchAllWorkflows { + return &AdminFetcherExtInterface_FetchAllWorkflows{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllWorkflows(ctx context.Context, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchAllWorkflows { + c_call := _m.On("FetchAllWorkflows", ctx, project, domain, filter) + return &AdminFetcherExtInterface_FetchAllWorkflows{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchAllWorkflowsMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchAllWorkflows { + c_call := _m.On("FetchAllWorkflows", matchers...) + return &AdminFetcherExtInterface_FetchAllWorkflows{Call: c_call} +} + +// FetchAllWorkflows provides a mock function with given fields: ctx, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchAllWorkflows(ctx context.Context, project string, domain string, filter filters.Filters) ([]*admin.NamedEntity, error) { + ret := _m.Called(ctx, project, domain, filter) + + var r0 []*admin.NamedEntity + if rf, ok := ret.Get(0).(func(context.Context, string, string, filters.Filters) []*admin.NamedEntity); ok { + r0 = rf(ctx, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.NamedEntity) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, filters.Filters) error); ok { + r1 = rf(ctx, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchExecution struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchExecution) Return(_a0 *admin.Execution, _a1 error) *AdminFetcherExtInterface_FetchExecution { + return &AdminFetcherExtInterface_FetchExecution{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchExecution(ctx context.Context, name string, project string, domain string) *AdminFetcherExtInterface_FetchExecution { + c_call := _m.On("FetchExecution", ctx, name, project, domain) + return &AdminFetcherExtInterface_FetchExecution{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchExecutionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchExecution { + c_call := _m.On("FetchExecution", matchers...) + return &AdminFetcherExtInterface_FetchExecution{Call: c_call} +} + +// FetchExecution provides a mock function with given fields: ctx, name, project, domain +func (_m *AdminFetcherExtInterface) FetchExecution(ctx context.Context, name string, project string, domain string) (*admin.Execution, error) { + ret := _m.Called(ctx, name, project, domain) + + var r0 *admin.Execution + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *admin.Execution); ok { + r0 = rf(ctx, name, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Execution) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchLPLatestVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchLPLatestVersion) Return(_a0 *admin.LaunchPlan, _a1 error) *AdminFetcherExtInterface_FetchLPLatestVersion { + return &AdminFetcherExtInterface_FetchLPLatestVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchLPLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchLPLatestVersion { + c_call := _m.On("FetchLPLatestVersion", ctx, name, project, domain, filter) + return &AdminFetcherExtInterface_FetchLPLatestVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchLPLatestVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchLPLatestVersion { + c_call := _m.On("FetchLPLatestVersion", matchers...) + return &AdminFetcherExtInterface_FetchLPLatestVersion{Call: c_call} +} + +// FetchLPLatestVersion provides a mock function with given fields: ctx, name, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchLPLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) (*admin.LaunchPlan, error) { + ret := _m.Called(ctx, name, project, domain, filter) + + var r0 *admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) *admin.LaunchPlan); ok { + r0 = rf(ctx, name, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, name, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchLPVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchLPVersion) Return(_a0 *admin.LaunchPlan, _a1 error) *AdminFetcherExtInterface_FetchLPVersion { + return &AdminFetcherExtInterface_FetchLPVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchLPVersion(ctx context.Context, name string, version string, project string, domain string) *AdminFetcherExtInterface_FetchLPVersion { + c_call := _m.On("FetchLPVersion", ctx, name, version, project, domain) + return &AdminFetcherExtInterface_FetchLPVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchLPVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchLPVersion { + c_call := _m.On("FetchLPVersion", matchers...) + return &AdminFetcherExtInterface_FetchLPVersion{Call: c_call} +} + +// FetchLPVersion provides a mock function with given fields: ctx, name, version, project, domain +func (_m *AdminFetcherExtInterface) FetchLPVersion(ctx context.Context, name string, version string, project string, domain string) (*admin.LaunchPlan, error) { + ret := _m.Called(ctx, name, version, project, domain) + + var r0 *admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.LaunchPlan); ok { + r0 = rf(ctx, name, version, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, version, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchNodeExecutionData struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchNodeExecutionData) Return(_a0 *admin.NodeExecutionGetDataResponse, _a1 error) *AdminFetcherExtInterface_FetchNodeExecutionData { + return &AdminFetcherExtInterface_FetchNodeExecutionData{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchNodeExecutionData(ctx context.Context, nodeID string, execName string, project string, domain string) *AdminFetcherExtInterface_FetchNodeExecutionData { + c_call := _m.On("FetchNodeExecutionData", ctx, nodeID, execName, project, domain) + return &AdminFetcherExtInterface_FetchNodeExecutionData{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchNodeExecutionDataMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchNodeExecutionData { + c_call := _m.On("FetchNodeExecutionData", matchers...) + return &AdminFetcherExtInterface_FetchNodeExecutionData{Call: c_call} +} + +// FetchNodeExecutionData provides a mock function with given fields: ctx, nodeID, execName, project, domain +func (_m *AdminFetcherExtInterface) FetchNodeExecutionData(ctx context.Context, nodeID string, execName string, project string, domain string) (*admin.NodeExecutionGetDataResponse, error) { + ret := _m.Called(ctx, nodeID, execName, project, domain) + + var r0 *admin.NodeExecutionGetDataResponse + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.NodeExecutionGetDataResponse); ok { + r0 = rf(ctx, nodeID, execName, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.NodeExecutionGetDataResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, nodeID, execName, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchNodeExecutionDetails struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchNodeExecutionDetails) Return(_a0 *admin.NodeExecutionList, _a1 error) *AdminFetcherExtInterface_FetchNodeExecutionDetails { + return &AdminFetcherExtInterface_FetchNodeExecutionDetails{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchNodeExecutionDetails(ctx context.Context, name string, project string, domain string, uniqueParentID string) *AdminFetcherExtInterface_FetchNodeExecutionDetails { + c_call := _m.On("FetchNodeExecutionDetails", ctx, name, project, domain, uniqueParentID) + return &AdminFetcherExtInterface_FetchNodeExecutionDetails{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchNodeExecutionDetailsMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchNodeExecutionDetails { + c_call := _m.On("FetchNodeExecutionDetails", matchers...) + return &AdminFetcherExtInterface_FetchNodeExecutionDetails{Call: c_call} +} + +// FetchNodeExecutionDetails provides a mock function with given fields: ctx, name, project, domain, uniqueParentID +func (_m *AdminFetcherExtInterface) FetchNodeExecutionDetails(ctx context.Context, name string, project string, domain string, uniqueParentID string) (*admin.NodeExecutionList, error) { + ret := _m.Called(ctx, name, project, domain, uniqueParentID) + + var r0 *admin.NodeExecutionList + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.NodeExecutionList); ok { + r0 = rf(ctx, name, project, domain, uniqueParentID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.NodeExecutionList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain, uniqueParentID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchProjectAttributes struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchProjectAttributes) Return(_a0 *admin.ProjectAttributesGetResponse, _a1 error) *AdminFetcherExtInterface_FetchProjectAttributes { + return &AdminFetcherExtInterface_FetchProjectAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) *AdminFetcherExtInterface_FetchProjectAttributes { + c_call := _m.On("FetchProjectAttributes", ctx, project, rsType) + return &AdminFetcherExtInterface_FetchProjectAttributes{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchProjectAttributesMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchProjectAttributes { + c_call := _m.On("FetchProjectAttributes", matchers...) + return &AdminFetcherExtInterface_FetchProjectAttributes{Call: c_call} +} + +// FetchProjectAttributes provides a mock function with given fields: ctx, project, rsType +func (_m *AdminFetcherExtInterface) FetchProjectAttributes(ctx context.Context, project string, rsType admin.MatchableResource) (*admin.ProjectAttributesGetResponse, error) { + ret := _m.Called(ctx, project, rsType) + + var r0 *admin.ProjectAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, string, admin.MatchableResource) *admin.ProjectAttributesGetResponse); ok { + r0 = rf(ctx, project, rsType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, admin.MatchableResource) error); ok { + r1 = rf(ctx, project, rsType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchProjectDomainAttributes struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchProjectDomainAttributes) Return(_a0 *admin.ProjectDomainAttributesGetResponse, _a1 error) *AdminFetcherExtInterface_FetchProjectDomainAttributes { + return &AdminFetcherExtInterface_FetchProjectDomainAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchProjectDomainAttributes(ctx context.Context, project string, domain string, rsType admin.MatchableResource) *AdminFetcherExtInterface_FetchProjectDomainAttributes { + c_call := _m.On("FetchProjectDomainAttributes", ctx, project, domain, rsType) + return &AdminFetcherExtInterface_FetchProjectDomainAttributes{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchProjectDomainAttributesMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchProjectDomainAttributes { + c_call := _m.On("FetchProjectDomainAttributes", matchers...) + return &AdminFetcherExtInterface_FetchProjectDomainAttributes{Call: c_call} +} + +// FetchProjectDomainAttributes provides a mock function with given fields: ctx, project, domain, rsType +func (_m *AdminFetcherExtInterface) FetchProjectDomainAttributes(ctx context.Context, project string, domain string, rsType admin.MatchableResource) (*admin.ProjectDomainAttributesGetResponse, error) { + ret := _m.Called(ctx, project, domain, rsType) + + var r0 *admin.ProjectDomainAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, string, string, admin.MatchableResource) *admin.ProjectDomainAttributesGetResponse); ok { + r0 = rf(ctx, project, domain, rsType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectDomainAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, admin.MatchableResource) error); ok { + r1 = rf(ctx, project, domain, rsType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchTaskExecutionsOnNode struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchTaskExecutionsOnNode) Return(_a0 *admin.TaskExecutionList, _a1 error) *AdminFetcherExtInterface_FetchTaskExecutionsOnNode { + return &AdminFetcherExtInterface_FetchTaskExecutionsOnNode{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskExecutionsOnNode(ctx context.Context, nodeID string, execName string, project string, domain string) *AdminFetcherExtInterface_FetchTaskExecutionsOnNode { + c_call := _m.On("FetchTaskExecutionsOnNode", ctx, nodeID, execName, project, domain) + return &AdminFetcherExtInterface_FetchTaskExecutionsOnNode{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskExecutionsOnNodeMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchTaskExecutionsOnNode { + c_call := _m.On("FetchTaskExecutionsOnNode", matchers...) + return &AdminFetcherExtInterface_FetchTaskExecutionsOnNode{Call: c_call} +} + +// FetchTaskExecutionsOnNode provides a mock function with given fields: ctx, nodeID, execName, project, domain +func (_m *AdminFetcherExtInterface) FetchTaskExecutionsOnNode(ctx context.Context, nodeID string, execName string, project string, domain string) (*admin.TaskExecutionList, error) { + ret := _m.Called(ctx, nodeID, execName, project, domain) + + var r0 *admin.TaskExecutionList + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.TaskExecutionList); ok { + r0 = rf(ctx, nodeID, execName, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.TaskExecutionList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, nodeID, execName, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchTaskLatestVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchTaskLatestVersion) Return(_a0 *admin.Task, _a1 error) *AdminFetcherExtInterface_FetchTaskLatestVersion { + return &AdminFetcherExtInterface_FetchTaskLatestVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchTaskLatestVersion { + c_call := _m.On("FetchTaskLatestVersion", ctx, name, project, domain, filter) + return &AdminFetcherExtInterface_FetchTaskLatestVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskLatestVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchTaskLatestVersion { + c_call := _m.On("FetchTaskLatestVersion", matchers...) + return &AdminFetcherExtInterface_FetchTaskLatestVersion{Call: c_call} +} + +// FetchTaskLatestVersion provides a mock function with given fields: ctx, name, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchTaskLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) (*admin.Task, error) { + ret := _m.Called(ctx, name, project, domain, filter) + + var r0 *admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) *admin.Task); ok { + r0 = rf(ctx, name, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, name, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchTaskVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchTaskVersion) Return(_a0 *admin.Task, _a1 error) *AdminFetcherExtInterface_FetchTaskVersion { + return &AdminFetcherExtInterface_FetchTaskVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskVersion(ctx context.Context, name string, version string, project string, domain string) *AdminFetcherExtInterface_FetchTaskVersion { + c_call := _m.On("FetchTaskVersion", ctx, name, version, project, domain) + return &AdminFetcherExtInterface_FetchTaskVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchTaskVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchTaskVersion { + c_call := _m.On("FetchTaskVersion", matchers...) + return &AdminFetcherExtInterface_FetchTaskVersion{Call: c_call} +} + +// FetchTaskVersion provides a mock function with given fields: ctx, name, version, project, domain +func (_m *AdminFetcherExtInterface) FetchTaskVersion(ctx context.Context, name string, version string, project string, domain string) (*admin.Task, error) { + ret := _m.Called(ctx, name, version, project, domain) + + var r0 *admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.Task); ok { + r0 = rf(ctx, name, version, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, version, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchWorkflowAttributes struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchWorkflowAttributes) Return(_a0 *admin.WorkflowAttributesGetResponse, _a1 error) *AdminFetcherExtInterface_FetchWorkflowAttributes { + return &AdminFetcherExtInterface_FetchWorkflowAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowAttributes(ctx context.Context, project string, domain string, name string, rsType admin.MatchableResource) *AdminFetcherExtInterface_FetchWorkflowAttributes { + c_call := _m.On("FetchWorkflowAttributes", ctx, project, domain, name, rsType) + return &AdminFetcherExtInterface_FetchWorkflowAttributes{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowAttributesMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchWorkflowAttributes { + c_call := _m.On("FetchWorkflowAttributes", matchers...) + return &AdminFetcherExtInterface_FetchWorkflowAttributes{Call: c_call} +} + +// FetchWorkflowAttributes provides a mock function with given fields: ctx, project, domain, name, rsType +func (_m *AdminFetcherExtInterface) FetchWorkflowAttributes(ctx context.Context, project string, domain string, name string, rsType admin.MatchableResource) (*admin.WorkflowAttributesGetResponse, error) { + ret := _m.Called(ctx, project, domain, name, rsType) + + var r0 *admin.WorkflowAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, admin.MatchableResource) *admin.WorkflowAttributesGetResponse); ok { + r0 = rf(ctx, project, domain, name, rsType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.WorkflowAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, admin.MatchableResource) error); ok { + r1 = rf(ctx, project, domain, name, rsType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchWorkflowLatestVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchWorkflowLatestVersion) Return(_a0 *admin.Workflow, _a1 error) *AdminFetcherExtInterface_FetchWorkflowLatestVersion { + return &AdminFetcherExtInterface_FetchWorkflowLatestVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_FetchWorkflowLatestVersion { + c_call := _m.On("FetchWorkflowLatestVersion", ctx, name, project, domain, filter) + return &AdminFetcherExtInterface_FetchWorkflowLatestVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowLatestVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchWorkflowLatestVersion { + c_call := _m.On("FetchWorkflowLatestVersion", matchers...) + return &AdminFetcherExtInterface_FetchWorkflowLatestVersion{Call: c_call} +} + +// FetchWorkflowLatestVersion provides a mock function with given fields: ctx, name, project, domain, filter +func (_m *AdminFetcherExtInterface) FetchWorkflowLatestVersion(ctx context.Context, name string, project string, domain string, filter filters.Filters) (*admin.Workflow, error) { + ret := _m.Called(ctx, name, project, domain, filter) + + var r0 *admin.Workflow + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, filters.Filters) *admin.Workflow); ok { + r0 = rf(ctx, name, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Workflow) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, filters.Filters) error); ok { + r1 = rf(ctx, name, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_FetchWorkflowVersion struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_FetchWorkflowVersion) Return(_a0 *admin.Workflow, _a1 error) *AdminFetcherExtInterface_FetchWorkflowVersion { + return &AdminFetcherExtInterface_FetchWorkflowVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowVersion(ctx context.Context, name string, version string, project string, domain string) *AdminFetcherExtInterface_FetchWorkflowVersion { + c_call := _m.On("FetchWorkflowVersion", ctx, name, version, project, domain) + return &AdminFetcherExtInterface_FetchWorkflowVersion{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnFetchWorkflowVersionMatch(matchers ...interface{}) *AdminFetcherExtInterface_FetchWorkflowVersion { + c_call := _m.On("FetchWorkflowVersion", matchers...) + return &AdminFetcherExtInterface_FetchWorkflowVersion{Call: c_call} +} + +// FetchWorkflowVersion provides a mock function with given fields: ctx, name, version, project, domain +func (_m *AdminFetcherExtInterface) FetchWorkflowVersion(ctx context.Context, name string, version string, project string, domain string) (*admin.Workflow, error) { + ret := _m.Called(ctx, name, version, project, domain) + + var r0 *admin.Workflow + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.Workflow); ok { + r0 = rf(ctx, name, version, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Workflow) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, version, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_GetProjectByID struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_GetProjectByID) Return(_a0 *admin.Project, _a1 error) *AdminFetcherExtInterface_GetProjectByID { + return &AdminFetcherExtInterface_GetProjectByID{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnGetProjectByID(ctx context.Context, projectID string) *AdminFetcherExtInterface_GetProjectByID { + c_call := _m.On("GetProjectByID", ctx, projectID) + return &AdminFetcherExtInterface_GetProjectByID{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnGetProjectByIDMatch(matchers ...interface{}) *AdminFetcherExtInterface_GetProjectByID { + c_call := _m.On("GetProjectByID", matchers...) + return &AdminFetcherExtInterface_GetProjectByID{Call: c_call} +} + +// GetProjectByID provides a mock function with given fields: ctx, projectID +func (_m *AdminFetcherExtInterface) GetProjectByID(ctx context.Context, projectID string) (*admin.Project, error) { + ret := _m.Called(ctx, projectID) + + var r0 *admin.Project + if rf, ok := ret.Get(0).(func(context.Context, string) *admin.Project); ok { + r0 = rf(ctx, projectID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Project) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, projectID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_ListExecution struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_ListExecution) Return(_a0 *admin.ExecutionList, _a1 error) *AdminFetcherExtInterface_ListExecution { + return &AdminFetcherExtInterface_ListExecution{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnListExecution(ctx context.Context, project string, domain string, filter filters.Filters) *AdminFetcherExtInterface_ListExecution { + c_call := _m.On("ListExecution", ctx, project, domain, filter) + return &AdminFetcherExtInterface_ListExecution{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnListExecutionMatch(matchers ...interface{}) *AdminFetcherExtInterface_ListExecution { + c_call := _m.On("ListExecution", matchers...) + return &AdminFetcherExtInterface_ListExecution{Call: c_call} +} + +// ListExecution provides a mock function with given fields: ctx, project, domain, filter +func (_m *AdminFetcherExtInterface) ListExecution(ctx context.Context, project string, domain string, filter filters.Filters) (*admin.ExecutionList, error) { + ret := _m.Called(ctx, project, domain, filter) + + var r0 *admin.ExecutionList + if rf, ok := ret.Get(0).(func(context.Context, string, string, filters.Filters) *admin.ExecutionList); ok { + r0 = rf(ctx, project, domain, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ExecutionList) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, filters.Filters) error); ok { + r1 = rf(ctx, project, domain, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminFetcherExtInterface_ListProjects struct { + *mock.Call +} + +func (_m AdminFetcherExtInterface_ListProjects) Return(_a0 *admin.Projects, _a1 error) *AdminFetcherExtInterface_ListProjects { + return &AdminFetcherExtInterface_ListProjects{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminFetcherExtInterface) OnListProjects(ctx context.Context, filter filters.Filters) *AdminFetcherExtInterface_ListProjects { + c_call := _m.On("ListProjects", ctx, filter) + return &AdminFetcherExtInterface_ListProjects{Call: c_call} +} + +func (_m *AdminFetcherExtInterface) OnListProjectsMatch(matchers ...interface{}) *AdminFetcherExtInterface_ListProjects { + c_call := _m.On("ListProjects", matchers...) + return &AdminFetcherExtInterface_ListProjects{Call: c_call} +} + +// ListProjects provides a mock function with given fields: ctx, filter +func (_m *AdminFetcherExtInterface) ListProjects(ctx context.Context, filter filters.Filters) (*admin.Projects, error) { + ret := _m.Called(ctx, filter) + + var r0 *admin.Projects + if rf, ok := ret.Get(0).(func(context.Context, filters.Filters) *admin.Projects); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Projects) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, filters.Filters) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flytectl/pkg/ext/mocks/admin_service_fetcher_ext_interface.go b/flytectl/pkg/ext/mocks/admin_service_fetcher_ext_interface.go new file mode 100644 index 0000000000..74b08a115a --- /dev/null +++ b/flytectl/pkg/ext/mocks/admin_service_fetcher_ext_interface.go @@ -0,0 +1,339 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + mock "github.com/stretchr/testify/mock" + + service "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +// AdminServiceFetcherExtInterface is an autogenerated mock type for the AdminServiceFetcherExtInterface type +type AdminServiceFetcherExtInterface struct { + mock.Mock +} + +type AdminServiceFetcherExtInterface_AdminServiceClient struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_AdminServiceClient) Return(_a0 service.AdminServiceClient) *AdminServiceFetcherExtInterface_AdminServiceClient { + return &AdminServiceFetcherExtInterface_AdminServiceClient{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminServiceFetcherExtInterface) OnAdminServiceClient() *AdminServiceFetcherExtInterface_AdminServiceClient { + c := _m.On("AdminServiceClient") + return &AdminServiceFetcherExtInterface_AdminServiceClient{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnAdminServiceClientMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_AdminServiceClient { + c := _m.On("AdminServiceClient", matchers...) + return &AdminServiceFetcherExtInterface_AdminServiceClient{Call: c} +} + +// AdminServiceClient provides a mock function with given fields: +func (_m *AdminServiceFetcherExtInterface) AdminServiceClient() service.AdminServiceClient { + ret := _m.Called() + + var r0 service.AdminServiceClient + if rf, ok := ret.Get(0).(func() service.AdminServiceClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(service.AdminServiceClient) + } + } + + return r0 +} + +type AdminServiceFetcherExtInterface_FetchAllVerOfLP struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchAllVerOfLP) Return(_a0 []*admin.LaunchPlan, _a1 error) *AdminServiceFetcherExtInterface_FetchAllVerOfLP { + return &AdminServiceFetcherExtInterface_FetchAllVerOfLP{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchAllVerOfLP(ctx context.Context, lpName string, project string, domain string) *AdminServiceFetcherExtInterface_FetchAllVerOfLP { + c := _m.On("FetchAllVerOfLP", ctx, lpName, project, domain) + return &AdminServiceFetcherExtInterface_FetchAllVerOfLP{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchAllVerOfLPMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchAllVerOfLP { + c := _m.On("FetchAllVerOfLP", matchers...) + return &AdminServiceFetcherExtInterface_FetchAllVerOfLP{Call: c} +} + +// FetchAllVerOfLP provides a mock function with given fields: ctx, lpName, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchAllVerOfLP(ctx context.Context, lpName string, project string, domain string) ([]*admin.LaunchPlan, error) { + ret := _m.Called(ctx, lpName, project, domain) + + var r0 []*admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) []*admin.LaunchPlan); ok { + r0 = rf(ctx, lpName, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, lpName, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchAllVerOfTask struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchAllVerOfTask) Return(_a0 []*admin.Task, _a1 error) *AdminServiceFetcherExtInterface_FetchAllVerOfTask { + return &AdminServiceFetcherExtInterface_FetchAllVerOfTask{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchAllVerOfTask(ctx context.Context, name string, project string, domain string) *AdminServiceFetcherExtInterface_FetchAllVerOfTask { + c := _m.On("FetchAllVerOfTask", ctx, name, project, domain) + return &AdminServiceFetcherExtInterface_FetchAllVerOfTask{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchAllVerOfTaskMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchAllVerOfTask { + c := _m.On("FetchAllVerOfTask", matchers...) + return &AdminServiceFetcherExtInterface_FetchAllVerOfTask{Call: c} +} + +// FetchAllVerOfTask provides a mock function with given fields: ctx, name, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchAllVerOfTask(ctx context.Context, name string, project string, domain string) ([]*admin.Task, error) { + ret := _m.Called(ctx, name, project, domain) + + var r0 []*admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) []*admin.Task); ok { + r0 = rf(ctx, name, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchExecution struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchExecution) Return(_a0 *admin.Execution, _a1 error) *AdminServiceFetcherExtInterface_FetchExecution { + return &AdminServiceFetcherExtInterface_FetchExecution{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchExecution(ctx context.Context, name string, project string, domain string) *AdminServiceFetcherExtInterface_FetchExecution { + c := _m.On("FetchExecution", ctx, name, project, domain) + return &AdminServiceFetcherExtInterface_FetchExecution{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchExecutionMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchExecution { + c := _m.On("FetchExecution", matchers...) + return &AdminServiceFetcherExtInterface_FetchExecution{Call: c} +} + +// FetchExecution provides a mock function with given fields: ctx, name, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchExecution(ctx context.Context, name string, project string, domain string) (*admin.Execution, error) { + ret := _m.Called(ctx, name, project, domain) + + var r0 *admin.Execution + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *admin.Execution); ok { + r0 = rf(ctx, name, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Execution) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchLPLatestVersion struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchLPLatestVersion) Return(_a0 *admin.LaunchPlan, _a1 error) *AdminServiceFetcherExtInterface_FetchLPLatestVersion { + return &AdminServiceFetcherExtInterface_FetchLPLatestVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchLPLatestVersion(ctx context.Context, name string, project string, domain string) *AdminServiceFetcherExtInterface_FetchLPLatestVersion { + c := _m.On("FetchLPLatestVersion", ctx, name, project, domain) + return &AdminServiceFetcherExtInterface_FetchLPLatestVersion{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchLPLatestVersionMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchLPLatestVersion { + c := _m.On("FetchLPLatestVersion", matchers...) + return &AdminServiceFetcherExtInterface_FetchLPLatestVersion{Call: c} +} + +// FetchLPLatestVersion provides a mock function with given fields: ctx, name, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchLPLatestVersion(ctx context.Context, name string, project string, domain string) (*admin.LaunchPlan, error) { + ret := _m.Called(ctx, name, project, domain) + + var r0 *admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *admin.LaunchPlan); ok { + r0 = rf(ctx, name, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchLPVersion struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchLPVersion) Return(_a0 *admin.LaunchPlan, _a1 error) *AdminServiceFetcherExtInterface_FetchLPVersion { + return &AdminServiceFetcherExtInterface_FetchLPVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchLPVersion(ctx context.Context, name string, version string, project string, domain string) *AdminServiceFetcherExtInterface_FetchLPVersion { + c := _m.On("FetchLPVersion", ctx, name, version, project, domain) + return &AdminServiceFetcherExtInterface_FetchLPVersion{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchLPVersionMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchLPVersion { + c := _m.On("FetchLPVersion", matchers...) + return &AdminServiceFetcherExtInterface_FetchLPVersion{Call: c} +} + +// FetchLPVersion provides a mock function with given fields: ctx, name, version, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchLPVersion(ctx context.Context, name string, version string, project string, domain string) (*admin.LaunchPlan, error) { + ret := _m.Called(ctx, name, version, project, domain) + + var r0 *admin.LaunchPlan + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.LaunchPlan); ok { + r0 = rf(ctx, name, version, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.LaunchPlan) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, version, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchTaskLatestVersion struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchTaskLatestVersion) Return(_a0 *admin.Task, _a1 error) *AdminServiceFetcherExtInterface_FetchTaskLatestVersion { + return &AdminServiceFetcherExtInterface_FetchTaskLatestVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchTaskLatestVersion(ctx context.Context, name string, project string, domain string) *AdminServiceFetcherExtInterface_FetchTaskLatestVersion { + c := _m.On("FetchTaskLatestVersion", ctx, name, project, domain) + return &AdminServiceFetcherExtInterface_FetchTaskLatestVersion{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchTaskLatestVersionMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchTaskLatestVersion { + c := _m.On("FetchTaskLatestVersion", matchers...) + return &AdminServiceFetcherExtInterface_FetchTaskLatestVersion{Call: c} +} + +// FetchTaskLatestVersion provides a mock function with given fields: ctx, name, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchTaskLatestVersion(ctx context.Context, name string, project string, domain string) (*admin.Task, error) { + ret := _m.Called(ctx, name, project, domain) + + var r0 *admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *admin.Task); ok { + r0 = rf(ctx, name, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok { + r1 = rf(ctx, name, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AdminServiceFetcherExtInterface_FetchTaskVersion struct { + *mock.Call +} + +func (_m AdminServiceFetcherExtInterface_FetchTaskVersion) Return(_a0 *admin.Task, _a1 error) *AdminServiceFetcherExtInterface_FetchTaskVersion { + return &AdminServiceFetcherExtInterface_FetchTaskVersion{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchTaskVersion(ctx context.Context, name string, version string, project string, domain string) *AdminServiceFetcherExtInterface_FetchTaskVersion { + c := _m.On("FetchTaskVersion", ctx, name, version, project, domain) + return &AdminServiceFetcherExtInterface_FetchTaskVersion{Call: c} +} + +func (_m *AdminServiceFetcherExtInterface) OnFetchTaskVersionMatch(matchers ...interface{}) *AdminServiceFetcherExtInterface_FetchTaskVersion { + c := _m.On("FetchTaskVersion", matchers...) + return &AdminServiceFetcherExtInterface_FetchTaskVersion{Call: c} +} + +// FetchTaskVersion provides a mock function with given fields: ctx, name, version, project, domain +func (_m *AdminServiceFetcherExtInterface) FetchTaskVersion(ctx context.Context, name string, version string, project string, domain string) (*admin.Task, error) { + ret := _m.Called(ctx, name, version, project, domain) + + var r0 *admin.Task + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *admin.Task); ok { + r0 = rf(ctx, name, version, project, domain) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.Task) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, name, version, project, domain) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flytectl/pkg/ext/mocks/admin_updater_ext_interface.go b/flytectl/pkg/ext/mocks/admin_updater_ext_interface.go new file mode 100644 index 0000000000..c1d9bc7e34 --- /dev/null +++ b/flytectl/pkg/ext/mocks/admin_updater_ext_interface.go @@ -0,0 +1,148 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + mock "github.com/stretchr/testify/mock" + + service "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +// AdminUpdaterExtInterface is an autogenerated mock type for the AdminUpdaterExtInterface type +type AdminUpdaterExtInterface struct { + mock.Mock +} + +type AdminUpdaterExtInterface_AdminServiceClient struct { + *mock.Call +} + +func (_m AdminUpdaterExtInterface_AdminServiceClient) Return(_a0 service.AdminServiceClient) *AdminUpdaterExtInterface_AdminServiceClient { + return &AdminUpdaterExtInterface_AdminServiceClient{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminUpdaterExtInterface) OnAdminServiceClient() *AdminUpdaterExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient") + return &AdminUpdaterExtInterface_AdminServiceClient{Call: c_call} +} + +func (_m *AdminUpdaterExtInterface) OnAdminServiceClientMatch(matchers ...interface{}) *AdminUpdaterExtInterface_AdminServiceClient { + c_call := _m.On("AdminServiceClient", matchers...) + return &AdminUpdaterExtInterface_AdminServiceClient{Call: c_call} +} + +// AdminServiceClient provides a mock function with given fields: +func (_m *AdminUpdaterExtInterface) AdminServiceClient() service.AdminServiceClient { + ret := _m.Called() + + var r0 service.AdminServiceClient + if rf, ok := ret.Get(0).(func() service.AdminServiceClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(service.AdminServiceClient) + } + } + + return r0 +} + +type AdminUpdaterExtInterface_UpdateProjectAttributes struct { + *mock.Call +} + +func (_m AdminUpdaterExtInterface_UpdateProjectAttributes) Return(_a0 error) *AdminUpdaterExtInterface_UpdateProjectAttributes { + return &AdminUpdaterExtInterface_UpdateProjectAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateProjectAttributes(ctx context.Context, project string, matchingAttr *admin.MatchingAttributes) *AdminUpdaterExtInterface_UpdateProjectAttributes { + c_call := _m.On("UpdateProjectAttributes", ctx, project, matchingAttr) + return &AdminUpdaterExtInterface_UpdateProjectAttributes{Call: c_call} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateProjectAttributesMatch(matchers ...interface{}) *AdminUpdaterExtInterface_UpdateProjectAttributes { + c_call := _m.On("UpdateProjectAttributes", matchers...) + return &AdminUpdaterExtInterface_UpdateProjectAttributes{Call: c_call} +} + +// UpdateProjectAttributes provides a mock function with given fields: ctx, project, matchingAttr +func (_m *AdminUpdaterExtInterface) UpdateProjectAttributes(ctx context.Context, project string, matchingAttr *admin.MatchingAttributes) error { + ret := _m.Called(ctx, project, matchingAttr) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *admin.MatchingAttributes) error); ok { + r0 = rf(ctx, project, matchingAttr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type AdminUpdaterExtInterface_UpdateProjectDomainAttributes struct { + *mock.Call +} + +func (_m AdminUpdaterExtInterface_UpdateProjectDomainAttributes) Return(_a0 error) *AdminUpdaterExtInterface_UpdateProjectDomainAttributes { + return &AdminUpdaterExtInterface_UpdateProjectDomainAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateProjectDomainAttributes(ctx context.Context, project string, domain string, matchingAttr *admin.MatchingAttributes) *AdminUpdaterExtInterface_UpdateProjectDomainAttributes { + c_call := _m.On("UpdateProjectDomainAttributes", ctx, project, domain, matchingAttr) + return &AdminUpdaterExtInterface_UpdateProjectDomainAttributes{Call: c_call} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateProjectDomainAttributesMatch(matchers ...interface{}) *AdminUpdaterExtInterface_UpdateProjectDomainAttributes { + c_call := _m.On("UpdateProjectDomainAttributes", matchers...) + return &AdminUpdaterExtInterface_UpdateProjectDomainAttributes{Call: c_call} +} + +// UpdateProjectDomainAttributes provides a mock function with given fields: ctx, project, domain, matchingAttr +func (_m *AdminUpdaterExtInterface) UpdateProjectDomainAttributes(ctx context.Context, project string, domain string, matchingAttr *admin.MatchingAttributes) error { + ret := _m.Called(ctx, project, domain, matchingAttr) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *admin.MatchingAttributes) error); ok { + r0 = rf(ctx, project, domain, matchingAttr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type AdminUpdaterExtInterface_UpdateWorkflowAttributes struct { + *mock.Call +} + +func (_m AdminUpdaterExtInterface_UpdateWorkflowAttributes) Return(_a0 error) *AdminUpdaterExtInterface_UpdateWorkflowAttributes { + return &AdminUpdaterExtInterface_UpdateWorkflowAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateWorkflowAttributes(ctx context.Context, project string, domain string, name string, matchingAttr *admin.MatchingAttributes) *AdminUpdaterExtInterface_UpdateWorkflowAttributes { + c_call := _m.On("UpdateWorkflowAttributes", ctx, project, domain, name, matchingAttr) + return &AdminUpdaterExtInterface_UpdateWorkflowAttributes{Call: c_call} +} + +func (_m *AdminUpdaterExtInterface) OnUpdateWorkflowAttributesMatch(matchers ...interface{}) *AdminUpdaterExtInterface_UpdateWorkflowAttributes { + c_call := _m.On("UpdateWorkflowAttributes", matchers...) + return &AdminUpdaterExtInterface_UpdateWorkflowAttributes{Call: c_call} +} + +// UpdateWorkflowAttributes provides a mock function with given fields: ctx, project, domain, name, matchingAttr +func (_m *AdminUpdaterExtInterface) UpdateWorkflowAttributes(ctx context.Context, project string, domain string, name string, matchingAttr *admin.MatchingAttributes) error { + ret := _m.Called(ctx, project, domain, name, matchingAttr) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *admin.MatchingAttributes) error); ok { + r0 = rf(ctx, project, domain, name, matchingAttr) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/pkg/ext/project_fetcher.go b/flytectl/pkg/ext/project_fetcher.go new file mode 100644 index 0000000000..2bac88296c --- /dev/null +++ b/flytectl/pkg/ext/project_fetcher.go @@ -0,0 +1,46 @@ +package ext + +import ( + "context" + "fmt" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" +) + +func (a *AdminFetcherExtClient) ListProjects(ctx context.Context, filter filters.Filters) (*admin.Projects, error) { + transformFilters, err := filters.BuildProjectListRequest(filter) + if err != nil { + return nil, err + } + e, err := a.AdminServiceClient().ListProjects(ctx, transformFilters) + if err != nil { + return nil, err + } + return e, nil +} + +func (a *AdminFetcherExtClient) GetProjectByID(ctx context.Context, projectID string) (*admin.Project, error) { + if projectID == "" { + return nil, fmt.Errorf("GetProjectByID: projectId is empty") + } + + response, err := a.AdminServiceClient().ListProjects(ctx, &admin.ProjectListRequest{ + Limit: 1, + Filters: fmt.Sprintf("eq(identifier,%s)", filters.EscapeValue(projectID)), + }) + if err != nil { + return nil, err + } + + if len(response.Projects) == 0 { + return nil, NewNotFoundError("project %s", projectID) + } + + if len(response.Projects) > 1 { + panic(fmt.Sprintf("unexpected number of projects in ListProjects response: %d - 0 or 1 expected", len(response.Projects))) + } + + return response.Projects[0], nil +} diff --git a/flytectl/pkg/ext/project_fetcher_test.go b/flytectl/pkg/ext/project_fetcher_test.go new file mode 100644 index 0000000000..d89d153aaa --- /dev/null +++ b/flytectl/pkg/ext/project_fetcher_test.go @@ -0,0 +1,45 @@ +package ext + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestAdminFetcherExtClient_ListProjects(t *testing.T) { + + project1 := &admin.Project{ + Id: "flyteexample", + Name: "flyteexample", + Domains: []*admin.Domain{ + { + Id: "development", + Name: "development", + }, + }, + } + + project2 := &admin.Project{ + Id: "flytesnacks", + Name: "flytesnacks", + Domains: []*admin.Domain{ + { + Id: "development", + Name: "development", + }, + }, + } + + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + + projects := &admin.Projects{ + Projects: []*admin.Project{project1, project2}, + } + adminClient.OnListProjectsMatch(mock.Anything, mock.Anything).Return(projects, nil) + _, err := adminFetcherExt.ListProjects(ctx, taskFilter) + assert.Nil(t, err) +} diff --git a/flytectl/pkg/ext/task_fetcher.go b/flytectl/pkg/ext/task_fetcher.go new file mode 100644 index 0000000000..3240c46214 --- /dev/null +++ b/flytectl/pkg/ext/task_fetcher.go @@ -0,0 +1,55 @@ +package ext + +import ( + "context" + "fmt" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func (a *AdminFetcherExtClient) FetchAllVerOfTask(ctx context.Context, name, project, domain string, filter filters.Filters) ([]*admin.Task, error) { + transformFilters, err := filters.BuildResourceListRequestWithName(filter, project, domain, name) + if err != nil { + return nil, err + } + tList, err := a.AdminServiceClient().ListTasks(ctx, transformFilters) + if err != nil { + return nil, err + } + if len(tList.Tasks) == 0 { + return nil, fmt.Errorf("no tasks retrieved for %v", name) + } + return tList.Tasks, nil +} + +func (a *AdminFetcherExtClient) FetchTaskLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Task, error) { + var t *admin.Task + var err error + // Fetch the latest version of the task. + var taskVersions []*admin.Task + taskVersions, err = a.FetchAllVerOfTask(ctx, name, project, domain, filter) + if err != nil { + return nil, err + } + t = taskVersions[0] + return t, nil +} + +func (a *AdminFetcherExtClient) FetchTaskVersion(ctx context.Context, name, version, project, domain string) (*admin.Task, error) { + t, err := a.AdminServiceClient().GetTask(ctx, &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: project, + Domain: domain, + Name: name, + Version: version, + }, + }) + if err != nil { + return nil, err + } + return t, nil +} diff --git a/flytectl/pkg/ext/task_fetcher_test.go b/flytectl/pkg/ext/task_fetcher_test.go new file mode 100644 index 0000000000..a1f605272a --- /dev/null +++ b/flytectl/pkg/ext/task_fetcher_test.go @@ -0,0 +1,149 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + adminFetcherExt AdminFetcherExtClient + adminClient *mocks.AdminServiceClient + ctx context.Context + taskListResponse *admin.TaskList + taskFilter = filters.Filters{} + task1 *admin.Task +) + +func getTaskFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + } + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + task1 := &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 0, Nanos: 0}, + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + } + + task2 := &admin.Task{ + Id: &core.Identifier{ + Name: "task1", + Version: "v2", + }, + Closure: &admin.TaskClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledTask: &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }, + }, + } + + tasks := []*admin.Task{task2, task1} + + taskListResponse = &admin.TaskList{ + Tasks: tasks, + } +} + +func TestFetchAllVerOfTask(t *testing.T) { + getTaskFetcherSetup() + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(taskListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfTask(ctx, "taskName", "project", "domain", taskFilter) + assert.Nil(t, err) +} + +func TestFetchTaskVersion(t *testing.T) { + getTaskFetcherSetup() + adminClient.OnGetTaskMatch(mock.Anything, mock.Anything).Return(task1, nil) + _, err := adminFetcherExt.FetchTaskVersion(ctx, "task1", "v1", "project", "domain") + assert.Nil(t, err) +} + +func TestFetchAllVerOfTaskError(t *testing.T) { + getTaskFetcherSetup() + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchAllVerOfTask(ctx, "taskName", "project", "domain", taskFilter) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchAllVerOfTaskFilterError(t *testing.T) { + getTaskFetcherSetup() + taskFilter = filters.Filters{ + FieldSelector: "hello=", + } + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchAllVerOfTask(ctx, "taskName", "project", "domain", taskFilter) + assert.NotNil(t, err) +} + +func TestFetchAllVerOfTaskEmptyResponse(t *testing.T) { + taskListResponse := &admin.TaskList{} + getTaskFetcherSetup() + taskFilter = filters.Filters{ + FieldSelector: "", + } + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(taskListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfTask(ctx, "taskName", "project", "domain", taskFilter) + assert.Equal(t, fmt.Errorf("no tasks retrieved for taskName"), err) +} + +func TestFetchTaskLatestVersion(t *testing.T) { + getTaskFetcherSetup() + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(taskListResponse, nil) + _, err := adminFetcherExt.FetchTaskLatestVersion(ctx, "taskName", "project", "domain", taskFilter) + assert.Nil(t, err) +} + +func TestFetchTaskLatestVersionError(t *testing.T) { + taskListResponse := &admin.TaskList{} + getTaskFetcherSetup() + adminClient.OnListTasksMatch(mock.Anything, mock.Anything).Return(taskListResponse, nil) + _, err := adminFetcherExt.FetchTaskLatestVersion(ctx, "taskName", "project", "domain", taskFilter) + assert.Equal(t, fmt.Errorf("no tasks retrieved for taskName"), err) +} diff --git a/flytectl/pkg/ext/updater.go b/flytectl/pkg/ext/updater.go new file mode 100644 index 0000000000..5121057d5b --- /dev/null +++ b/flytectl/pkg/ext/updater.go @@ -0,0 +1,36 @@ +package ext + +import ( + "context" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" +) + +//go:generate mockery -all -case=underscore + +// AdminUpdaterExtInterface Interface for exposing the update capabilities from the admin +type AdminUpdaterExtInterface interface { + AdminServiceClient() service.AdminServiceClient + + // UpdateWorkflowAttributes updates workflow attributes within a project, domain for a particular matchable resource + UpdateWorkflowAttributes(ctx context.Context, project, domain, name string, matchingAttr *admin.MatchingAttributes) error + + // UpdateProjectDomainAttributes updates project domain attributes for a particular matchable resource + UpdateProjectDomainAttributes(ctx context.Context, project, domain string, matchingAttr *admin.MatchingAttributes) error + + // UpdateProjectAttributes updates project attributes for a particular matchable resource + UpdateProjectAttributes(ctx context.Context, project string, matchingAttr *admin.MatchingAttributes) error +} + +// AdminUpdaterExtClient is used for interacting with extended features used for updating data in admin service +type AdminUpdaterExtClient struct { + AdminClient service.AdminServiceClient +} + +func (a *AdminUpdaterExtClient) AdminServiceClient() service.AdminServiceClient { + if a == nil { + return nil + } + return a.AdminClient +} diff --git a/flytectl/pkg/ext/updater_test.go b/flytectl/pkg/ext/updater_test.go new file mode 100644 index 0000000000..cdb21cf603 --- /dev/null +++ b/flytectl/pkg/ext/updater_test.go @@ -0,0 +1,17 @@ +package ext + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/stretchr/testify/assert" +) + +var updaterFetcherClient *AdminUpdaterExtClient + +func TestAdminUpdaterExtClient_AdminServiceClient(t *testing.T) { + adminClient = new(mocks.AdminServiceClient) + updaterFetcherClient = nil + client := updaterFetcherClient.AdminServiceClient() + assert.Nil(t, client) +} diff --git a/flytectl/pkg/ext/workflow_fetcher.go b/flytectl/pkg/ext/workflow_fetcher.go new file mode 100644 index 0000000000..7b0bf9d82c --- /dev/null +++ b/flytectl/pkg/ext/workflow_fetcher.go @@ -0,0 +1,70 @@ +package ext + +import ( + "context" + "fmt" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +// FetchAllVerOfWorkflow fetches all the versions for give workflow name +func (a *AdminFetcherExtClient) FetchAllVerOfWorkflow(ctx context.Context, workflowName, project, domain string, filter filters.Filters) ([]*admin.Workflow, error) { + tranformFilters, err := filters.BuildResourceListRequestWithName(filter, project, domain, workflowName) + if err != nil { + return nil, err + } + wList, err := a.AdminServiceClient().ListWorkflows(ctx, tranformFilters) + if err != nil { + return nil, err + } + if len(wList.Workflows) == 0 { + return nil, fmt.Errorf("no workflow retrieved for %v", workflowName) + } + return wList.Workflows, nil +} + +// FetchAllWorkflows fetches all workflows in project domain +func (a *AdminFetcherExtClient) FetchAllWorkflows(ctx context.Context, project, domain string, filter filters.Filters) ([]*admin.NamedEntity, error) { + tranformFilters, err := filters.BuildNamedEntityListRequest(filter, project, domain, core.ResourceType_WORKFLOW) + if err != nil { + return nil, err + } + wList, err := a.AdminServiceClient().ListNamedEntities(ctx, tranformFilters) + if err != nil { + return nil, err + } + if len(wList.Entities) == 0 { + return nil, fmt.Errorf("no workflow retrieved for %v project %v domain", project, domain) + } + return wList.Entities, nil +} + +// FetchWorkflowLatestVersion fetches latest version for given workflow name +func (a *AdminFetcherExtClient) FetchWorkflowLatestVersion(ctx context.Context, name, project, domain string, filter filters.Filters) (*admin.Workflow, error) { + // Fetch the latest version of the workflow. + wVersions, err := a.FetchAllVerOfWorkflow(ctx, name, project, domain, filter) + if err != nil { + return nil, err + } + return a.FetchWorkflowVersion(ctx, name, wVersions[0].Id.Version, project, domain) +} + +// FetchWorkflowVersion fetches particular version of workflow +func (a *AdminFetcherExtClient) FetchWorkflowVersion(ctx context.Context, name, version, project, domain string) (*admin.Workflow, error) { + lp, err := a.AdminServiceClient().GetWorkflow(ctx, &admin.ObjectGetRequest{ + Id: &core.Identifier{ + ResourceType: core.ResourceType_WORKFLOW, + Project: project, + Domain: domain, + Name: name, + Version: version, + }, + }) + if err != nil { + return nil, err + } + return lp, nil +} diff --git a/flytectl/pkg/ext/workflow_fetcher_test.go b/flytectl/pkg/ext/workflow_fetcher_test.go new file mode 100644 index 0000000000..b035e71a99 --- /dev/null +++ b/flytectl/pkg/ext/workflow_fetcher_test.go @@ -0,0 +1,162 @@ +package ext + +import ( + "context" + "fmt" + "testing" + + "github.com/flyteorg/flytectl/pkg/filters" + + "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + workflowListResponse *admin.WorkflowList + namedEntityListResponse *admin.NamedEntityList + workflowFilter = filters.Filters{} + workflowResponse *admin.Workflow +) + +func getWorkflowFetcherSetup() { + ctx = context.Background() + adminClient = new(mocks.AdminServiceClient) + adminFetcherExt = AdminFetcherExtClient{AdminClient: adminClient} + + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + } + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + var compiledTasks []*core.CompiledTask + compiledTasks = append(compiledTasks, &core.CompiledTask{ + Template: &core.TaskTemplate{ + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }) + + workflow1 := &admin.Workflow{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Tasks: compiledTasks, + }, + }, + } + workflow2 := &admin.Workflow{ + Id: &core.Identifier{ + Name: "workflow", + Version: "v2", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Tasks: compiledTasks, + }, + }, + } + + namedEntity := &admin.NamedEntity{ + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "workflow", + }, + ResourceType: core.ResourceType_WORKFLOW, + } + + workflows := []*admin.Workflow{workflow2, workflow1} + + namedEntityListResponse = &admin.NamedEntityList{ + Entities: []*admin.NamedEntity{namedEntity}, + } + workflowListResponse = &admin.WorkflowList{ + Workflows: workflows, + } + workflowResponse = workflows[0] +} + +func TestFetchAllWorkflows(t *testing.T) { + t.Run("non empty response", func(t *testing.T) { + getWorkflowFetcherSetup() + adminClient.OnListNamedEntitiesMatch(mock.Anything, mock.Anything).Return(namedEntityListResponse, nil) + _, err := adminFetcherExt.FetchAllWorkflows(ctx, "project", "domain", workflowFilter) + assert.Nil(t, err) + }) + t.Run("empty response", func(t *testing.T) { + getWorkflowFetcherSetup() + namedEntityListResponse := &admin.NamedEntityList{} + adminClient.OnListNamedEntitiesMatch(mock.Anything, mock.Anything).Return(namedEntityListResponse, nil) + _, err := adminFetcherExt.FetchAllWorkflows(ctx, "project", "domain", workflowFilter) + assert.Equal(t, fmt.Errorf("no workflow retrieved for project project domain domain"), err) + }) +} + +func TestFetchAllWorkflowsError(t *testing.T) { + getWorkflowFetcherSetup() + adminClient.OnListNamedEntitiesMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchAllWorkflows(ctx, "project", "domain", workflowFilter) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchAllVerOfWorkflow(t *testing.T) { + getWorkflowFetcherSetup() + adminClient.OnListWorkflowsMatch(mock.Anything, mock.Anything).Return(workflowListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfWorkflow(ctx, "workflowName", "project", "domain", workflowFilter) + assert.Nil(t, err) +} + +func TestFetchAllVerOfWorkflowError(t *testing.T) { + getWorkflowFetcherSetup() + adminClient.OnListWorkflowsMatch(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) + _, err := adminFetcherExt.FetchAllVerOfWorkflow(ctx, "workflowName", "project", "domain", workflowFilter) + assert.Equal(t, fmt.Errorf("failed"), err) +} + +func TestFetchAllVerOfWorkflowEmptyResponse(t *testing.T) { + workflowListResponse := &admin.WorkflowList{} + getWorkflowFetcherSetup() + adminClient.OnListWorkflowsMatch(mock.Anything, mock.Anything).Return(workflowListResponse, nil) + _, err := adminFetcherExt.FetchAllVerOfWorkflow(ctx, "workflowName", "project", "domain", workflowFilter) + assert.Equal(t, fmt.Errorf("no workflow retrieved for workflowName"), err) +} + +func TestFetchWorkflowLatestVersion(t *testing.T) { + getWorkflowFetcherSetup() + adminClient.OnGetWorkflowMatch(mock.Anything, mock.Anything).Return(workflowResponse, nil) + adminClient.OnListWorkflowsMatch(mock.Anything, mock.Anything).Return(workflowListResponse, nil) + _, err := adminFetcherExt.FetchWorkflowLatestVersion(ctx, "workflowName", "project", "domain", workflowFilter) + assert.Nil(t, err) +} + +func TestFetchWorkflowLatestVersionError(t *testing.T) { + workflowListResponse := &admin.WorkflowList{} + getWorkflowFetcherSetup() + adminClient.OnListWorkflowsMatch(mock.Anything, mock.Anything).Return(workflowListResponse, nil) + _, err := adminFetcherExt.FetchWorkflowLatestVersion(ctx, "workflowName", "project", "domain", workflowFilter) + assert.Equal(t, fmt.Errorf("no workflow retrieved for workflowName"), err) +} diff --git a/flytectl/pkg/filesystemutils/file_system_utils.go b/flytectl/pkg/filesystemutils/file_system_utils.go new file mode 100644 index 0000000000..77ac4d8eb0 --- /dev/null +++ b/flytectl/pkg/filesystemutils/file_system_utils.go @@ -0,0 +1,70 @@ +package filesystemutils + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" +) + +var osUserHomDirFunc = os.UserHomeDir +var filePathJoinFunc = filepath.Join + +// UserHomeDir Returns the users home directory or on error returns the current dir +func UserHomeDir() string { + if homeDir, err := osUserHomDirFunc(); err == nil { + return homeDir + } + return "." +} + +// FilePathJoin Returns the file path obtained by joining various path elements. +func FilePathJoin(elems ...string) string { + return filePathJoinFunc(elems...) +} + +func ExtractTar(ss io.Reader, destination string) error { + tarReader := tar.NewReader(ss) + + for { + header, err := tarReader.Next() + + if err == io.EOF { + break + } + + if err != nil { + return err + } + + switch header.Typeflag { + case tar.TypeDir: + if err := os.Mkdir(header.Name, 0755); err != nil { + return err + } + case tar.TypeReg: + fmt.Printf("Creating Flyte configuration file at: %s\n", destination) + outFile, err := os.Create(destination) + if err != nil { + return err + } + for { + // Read one 1MB at a time. + if _, err := io.CopyN(outFile, tarReader, 1024*1024); err != nil { + if err == io.EOF { + break + } + return err + } + } + outFile.Close() + + default: + return fmt.Errorf("ExtractTarGz: unknown type: %v in %s", + header.Typeflag, + header.Name) + } + } + return nil +} diff --git a/flytectl/pkg/filesystemutils/file_system_utils_test.go b/flytectl/pkg/filesystemutils/file_system_utils_test.go new file mode 100644 index 0000000000..2bca38d827 --- /dev/null +++ b/flytectl/pkg/filesystemutils/file_system_utils_test.go @@ -0,0 +1,112 @@ +package filesystemutils + +import ( + "archive/tar" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + homeDirVal = "/home/user" + homeDirErr error +) + +func FakeUserHomeDir() (string, error) { + return homeDirVal, homeDirErr +} + +func TestUserHomeDir(t *testing.T) { + t.Run("User home dir", func(t *testing.T) { + osUserHomDirFunc = FakeUserHomeDir + homeDir := UserHomeDir() + assert.Equal(t, homeDirVal, homeDir) + }) + t.Run("User home dir fail", func(t *testing.T) { + homeDirErr = fmt.Errorf("failed to get users home directory") + homeDirVal = "." + osUserHomDirFunc = FakeUserHomeDir + homeDir := UserHomeDir() + assert.Equal(t, ".", homeDir) + // Reset + homeDirErr = nil + homeDirVal = "/home/user" + }) +} + +func TestFilePathJoin(t *testing.T) { + t.Run("File path join", func(t *testing.T) { + homeDir := FilePathJoin("/", "home", "user") + assert.Equal(t, "/home/user", homeDir) + }) +} + +func TestTaring(t *testing.T) { + // Create a fake tar file in tmp. + text := "a: b" + fo, err := os.CreateTemp("", "sampledata") + assert.NoError(t, err) + tarWriter := tar.NewWriter(fo) + err = tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: "flyte.yaml", + Size: 4, + Mode: 0640, + ModTime: time.Unix(1245206587, 0), + }) + assert.NoError(t, err) + cnt, err := tarWriter.Write([]byte(text)) + assert.NoError(t, err) + assert.Equal(t, 4, cnt) + tarWriter.Close() + fo.Close() + + t.Run("Basic testing", func(t *testing.T) { + destFile, err := os.CreateTemp("", "sampledata") + assert.NoError(t, err) + reader, err := os.Open(fo.Name()) + assert.NoError(t, err) + err = ExtractTar(reader, destFile.Name()) + assert.NoError(t, err) + fileBytes, err := os.ReadFile(destFile.Name()) + assert.NoError(t, err) + readString := string(fileBytes) + assert.Equal(t, text, readString) + + // Try to extract the file we just extracted again. It's not a tar file obviously so it should error + reader, err = os.Open(destFile.Name()) + assert.NoError(t, err) + err = ExtractTar(reader, destFile.Name()) + assert.Errorf(t, err, "unexpected EOF") + }) +} + +func TestTarBadHeader(t *testing.T) { + // Create a fake tar file in tmp. + fo, err := os.CreateTemp("", "sampledata") + assert.NoError(t, err) + tarWriter := tar.NewWriter(fo) + // Write a symlink, we should not know how to parse. + err = tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: "flyte.yaml", + Size: 4, + Mode: 0640, + ModTime: time.Unix(1245206587, 0), + }) + assert.NoError(t, err) + tarWriter.Close() + fo.Close() + + t.Run("Basic testing", func(t *testing.T) { + destFile, err := os.CreateTemp("", "sampledata") + assert.NoError(t, err) + reader, err := os.Open(fo.Name()) + assert.NoError(t, err) + err = ExtractTar(reader, destFile.Name()) + assert.Errorf(t, err, "ExtractTarGz: unknown type") + }) +} diff --git a/flytectl/pkg/filters/filters.go b/flytectl/pkg/filters/filters.go new file mode 100644 index 0000000000..836dc50eba --- /dev/null +++ b/flytectl/pkg/filters/filters.go @@ -0,0 +1,178 @@ +package filters + +import ( + "bytes" + "fmt" + "regexp" + "strings" +) + +var ( + InReg = regexp.MustCompile(` in `) + ContainsReg = regexp.MustCompile(` contains `) + InRegValue = regexp.MustCompile(`(?s)\((.*)\)`) + termOperators = []string{NotEquals, Equals, GreaterThanEquals, GreaterThan, LessThanEquals, LessThan, Contains, In} +) + +// SplitTerms split the filter string and returns the map of strings +func SplitTerms(filter string) []string { + if filter != "" { + return strings.Split(filter, ",") + } + return []string{} +} + +// Transform transform the field selector term from string to flyteadmin field selector syntax +func Transform(filters []string) (string, error) { + adminFilter := "" + for _, f := range filters { + if lhs, op, rhs, ok := parse(f); ok { + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return "", err + } + if ok := validate(lhs, rhs); ok { + transformFilter := transform(lhs, op, unescapedRHS) + if len(adminFilter) > 0 { + adminFilter = fmt.Sprintf("%v+%v", adminFilter, transformFilter) + } else { + adminFilter = fmt.Sprintf("%v", transformFilter) + } + } else { + // TODO(Yuvraj): Add filters docs in error + return "", fmt.Errorf("Please add a valid field selector") + } + } + } + return adminFilter, nil +} + +// validate validate the field selector operation +func validate(lhs, rhs string) bool { + // TODO Add Validation check with regular expression + if len(lhs) > 0 && len(rhs) > 0 { + return true + } + return false +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// EscapeValue escapes strings to be used as values in filter queries. +func EscapeValue(s string) string { + replacer := strings.NewReplacer( + `\`, `\\`, + `,`, `\,`, + `=`, `\=`, + ) + return replacer.Replace(s) +} + +// UnescapeValue unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// parse parse the filter string into an operation string and return the lhs,rhs value and operation type +func parse(filter string) (lhs, op, rhs string, ok bool) { + for i := range filter { + remaining := filter[i:] + var results []string + for _, op := range termOperators { + if op == Contains { + if ContainsReg.MatchString(filter) { + results = ContainsReg.Split(filter, 2) + return results[0], op, results[1], true + } + } else if op == In { + if InReg.MatchString(filter) { + results = InReg.Split(filter, 2) + values := InRegValue.FindAllStringSubmatch(strings.TrimSpace(results[1]), -1) + return results[0], op, values[0][1], true + } + } else { + if strings.HasPrefix(remaining, op) { + return filter[0:i], op, filter[i+len(op):], true + } + } + } + } + return "", "", "", false +} + +// transform it transform the field selector operation and return flyteadmin filter syntax +func transform(lhs, op, rhs string) string { + switch op { + case GreaterThanEquals: + return fmt.Sprintf("gte(%v,%v)", lhs, rhs) + case LessThanEquals: + return fmt.Sprintf("lte(%v,%v)", lhs, rhs) + case GreaterThan: + return fmt.Sprintf("gt(%v,%v)", lhs, rhs) + case LessThan: + return fmt.Sprintf("lt(%v,%v)", lhs, rhs) + case Contains: + return fmt.Sprintf("contains(%v,%v)", lhs, rhs) + case NotEquals: + return fmt.Sprintf("ne(%v,%v)", lhs, rhs) + case Equals: + return fmt.Sprintf("eq(%v,%v)", lhs, rhs) + case In: + return fmt.Sprintf("value_in(%v,%v)", lhs, rhs) + } + return "" +} diff --git a/flytectl/pkg/filters/filters_test.go b/flytectl/pkg/filters/filters_test.go new file mode 100644 index 0000000000..43cfb52d3b --- /dev/null +++ b/flytectl/pkg/filters/filters_test.go @@ -0,0 +1,84 @@ +package filters + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestCase struct { + Input string `json:"input"` + Output string `json:"output"` +} + +func TestTransformFilter(t *testing.T) { + tests := []TestCase{ + { + Input: "project.name=flytesnacks,execution.duration<200,execution.duration<=200,execution.duration>=200,name contains flyte,name!=flyte", + Output: "eq(project.name,flytesnacks)+lt(execution.duration,200)+lte(execution.duration,200)+gte(execution.duration,200)+contains(name,flyte)+ne(name,flyte)", + }, + { + Input: "execution.phase in (FAILED;SUCCEEDED),execution.name=y8n2wtuspj,execution.duration>200", + Output: "value_in(execution.phase,FAILED;SUCCEEDED)+eq(execution.name,y8n2wtuspj)+gt(execution.duration,200)", + }, + { + Input: `k=\\,,k2=v2`, + Output: "eq(k,\\)+eq(k2,v2)", + }, + } + for _, test := range tests { + filters := SplitTerms(test.Input) + + result, err := Transform(filters) + assert.Nil(t, err) + assert.Equal(t, test.Output, result) + } +} + +func TestTransformFilterError(t *testing.T) { + tests := []TestCase{ + { + Input: `\=\,\`, + Output: "", + }, + { + Input: `foo=bar,baz=blah,complex=\=value\\\,\\`, + Output: "", + }, + } + for _, test := range tests { + filters := SplitTerms(test.Input) + result, err := Transform(filters) + assert.NotNil(t, err) + assert.Equal(t, "", result) + } +} + +func TestParseFailed(t *testing.T) { + tests := []TestCase{ + { + Input: ``, + Output: "", + }, + } + for _, test := range tests { + lhs, op, rhs, ok := parse(test.Input) + result := transform(lhs, op, rhs) + assert.Equal(t, "", result) + assert.Equal(t, false, ok) + assert.Equal(t, "", lhs) + assert.Equal(t, "", rhs) + assert.Equal(t, "", op) + } +} + +func TestEscapeValue(t *testing.T) { + assert.Equal(t, "", EscapeValue("")) + assert.Equal(t, "abc", EscapeValue("abc")) + assert.Equal(t, `\\`, EscapeValue(`\`)) + assert.Equal(t, `\\\\`, EscapeValue(`\\`)) + assert.Equal(t, `\,`, EscapeValue(`,`)) + assert.Equal(t, `\,\,`, EscapeValue(`,,`)) + assert.Equal(t, `\=`, EscapeValue(`=`)) + assert.Equal(t, `\=\=`, EscapeValue(`==`)) +} diff --git a/flytectl/pkg/filters/operator.go b/flytectl/pkg/filters/operator.go new file mode 100644 index 0000000000..37d8c94d2e --- /dev/null +++ b/flytectl/pkg/filters/operator.go @@ -0,0 +1,12 @@ +package filters + +const ( + Equals string = "=" + In string = "in" + Contains string = "contains" + NotEquals string = "!=" + GreaterThan string = ">" + GreaterThanEquals string = ">=" + LessThan string = "<" + LessThanEquals string = "<=" +) diff --git a/flytectl/pkg/filters/type.go b/flytectl/pkg/filters/type.go new file mode 100644 index 0000000000..1a46a4fa2c --- /dev/null +++ b/flytectl/pkg/filters/type.go @@ -0,0 +1,19 @@ +package filters + +var ( + DefaultLimit int32 = 100 + DefaultFilter = Filters{ + Limit: DefaultLimit, + Page: 1, + SortBy: "created_at", + Asc: false, + } +) + +type Filters struct { + FieldSelector string `json:"fieldSelector" pflag:",Specifies the Field selector"` + SortBy string `json:"sortBy" pflag:",Specifies which field to sort results "` + Limit int32 `json:"limit" pflag:",Specifies the limit"` + Asc bool `json:"asc" pflag:",Specifies the sorting order. By default flytectl sort result in descending order"` + Page int32 `json:"page" pflag:",Specifies the page number, in case there are multiple pages of results"` +} diff --git a/flytectl/pkg/filters/util.go b/flytectl/pkg/filters/util.go new file mode 100644 index 0000000000..a19481e32d --- /dev/null +++ b/flytectl/pkg/filters/util.go @@ -0,0 +1,86 @@ +package filters + +import ( + "strconv" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func BuildResourceListRequestWithName(c Filters, project, domain, name string) (*admin.ResourceListRequest, error) { + fieldSelector, err := Transform(SplitTerms(c.FieldSelector)) + if err != nil { + return nil, err + } + request := &admin.ResourceListRequest{ + Limit: uint32(c.Limit), + Token: getToken(c), + Filters: fieldSelector, + Id: &admin.NamedEntityIdentifier{ + Project: project, + Domain: domain, + }, + } + if len(name) > 0 { + request.Id.Name = name + } + if sort := buildSortingRequest(c); sort != nil { + request.SortBy = sort + } + return request, nil +} + +func BuildNamedEntityListRequest(c Filters, project, domain string, resourceType core.ResourceType) (*admin.NamedEntityListRequest, error) { + fieldSelector, err := Transform(SplitTerms(c.FieldSelector)) + if err != nil { + return nil, err + } + request := &admin.NamedEntityListRequest{ + Limit: uint32(c.Limit), + Token: getToken(c), + Filters: fieldSelector, + Project: project, + Domain: domain, + ResourceType: resourceType, + } + if sort := buildSortingRequest(c); sort != nil { + request.SortBy = sort + } + return request, nil +} + +func BuildProjectListRequest(c Filters) (*admin.ProjectListRequest, error) { + fieldSelector, err := Transform(SplitTerms(c.FieldSelector)) + if err != nil { + return nil, err + } + request := &admin.ProjectListRequest{ + Limit: uint32(c.Limit), + Token: getToken(c), + Filters: fieldSelector, + SortBy: buildSortingRequest(c), + } + return request, nil +} + +func buildSortingRequest(c Filters) *admin.Sort { + sortingOrder := admin.Sort_DESCENDING + if c.Asc { + sortingOrder = admin.Sort_ASCENDING + } + if len(c.SortBy) > 0 { + return &admin.Sort{ + Key: c.SortBy, + Direction: sortingOrder, + } + } + return nil +} + +func getToken(c Filters) string { + token := int(c.Page-1) * int(c.Limit) + if token <= 0 { + return "" + } + return strconv.Itoa(token) +} diff --git a/flytectl/pkg/filters/util_test.go b/flytectl/pkg/filters/util_test.go new file mode 100644 index 0000000000..b1289455a4 --- /dev/null +++ b/flytectl/pkg/filters/util_test.go @@ -0,0 +1,114 @@ +package filters + +import ( + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flytectl/cmd/config" + "github.com/stretchr/testify/assert" +) + +var ( + project = "flytesnack" + domain = "staging" + name = "test" + output = "json" +) + +func TestListRequestWithoutNameFunc(t *testing.T) { + config.GetConfig().Output = output + config.GetConfig().Project = project + config.GetConfig().Domain = domain + filter := Filters{ + Limit: 100, + SortBy: "created_at", + Asc: true, + } + request, err := BuildResourceListRequestWithName(filter, project, domain, "") + expectedResponse := &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: project, + Domain: domain, + }, + Limit: 100, + SortBy: &admin.Sort{ + Key: "created_at", + Direction: admin.Sort_ASCENDING, + }, + Filters: "", + } + assert.Nil(t, err) + assert.Equal(t, expectedResponse, request) +} + +func TestProjectListRequestFunc(t *testing.T) { + config.GetConfig().Output = output + config.GetConfig().Project = project + config.GetConfig().Domain = domain + filter := Filters{ + Limit: 100, + Page: 2, + SortBy: "created_at", + } + request, err := BuildProjectListRequest(filter) + expectedResponse := &admin.ProjectListRequest{ + Limit: 100, + Token: "100", + Filters: "", + SortBy: &admin.Sort{ + Key: "created_at", + Direction: admin.Sort_DESCENDING, + }, + } + assert.Nil(t, err) + assert.Equal(t, expectedResponse, request) +} + +func TestProjectListWithRequestFuncError(t *testing.T) { + config.GetConfig().Output = output + config.GetConfig().Project = project + config.GetConfig().Domain = domain + filter := Filters{ + FieldSelector: "Hello=", + Limit: 100, + } + request, err := BuildProjectListRequest(filter) + assert.NotNil(t, err) + assert.Nil(t, request) +} + +func TestListRequestWithNameFunc(t *testing.T) { + config.GetConfig().Output = output + filter := Filters{ + Limit: 100, + SortBy: "created_at", + Page: 1, + } + request, err := BuildResourceListRequestWithName(filter, project, domain, name) + expectedResponse := &admin.ResourceListRequest{ + Id: &admin.NamedEntityIdentifier{ + Project: project, + Domain: domain, + Name: name, + }, + Limit: 100, + SortBy: &admin.Sort{ + Key: "created_at", + Direction: admin.Sort_DESCENDING, + }, + } + assert.Nil(t, err) + assert.Equal(t, expectedResponse, request) +} + +func TestListRequestWithNameFuncError(t *testing.T) { + config.GetConfig().Output = output + filter := Filters{ + Limit: 100, + SortBy: "created_at", + FieldSelector: "hello=", + } + request, err := BuildResourceListRequestWithName(filter, project, domain, name) + assert.NotNil(t, err) + assert.Nil(t, request) +} diff --git a/flytectl/pkg/github/githubutil.go b/flytectl/pkg/github/githubutil.go new file mode 100644 index 0000000000..680085370b --- /dev/null +++ b/flytectl/pkg/github/githubutil.go @@ -0,0 +1,245 @@ +package github + +import ( + "context" + "fmt" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/flyteorg/flyte/flytestdlib/logger" + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + "github.com/flyteorg/flytectl/pkg/platformutil" + "github.com/flyteorg/flytectl/pkg/util" + + "github.com/google/go-github/v42/github" + "github.com/mouuff/go-rocket-update/pkg/provider" + "github.com/mouuff/go-rocket-update/pkg/updater" + "golang.org/x/oauth2" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +const ( + owner = "flyteorg" + flyte = "flyte" + flytectl = "flytectl" + sandboxSupportedVersion = "v0.10.0" + flytectlRepository = "github.com/flyteorg/flytectl" + commonMessage = "\n A new release of flytectl is available: %s โ†’ %s \n" + brewMessage = "To upgrade, run: brew update && brew upgrade flytectl \n" + linuxMessage = "To upgrade, run: flytectl upgrade \n" + darwinMessage = "To upgrade, run: flytectl upgrade \n" + releaseURL = "https://github.com/flyteorg/flytectl/releases/tag/%s \n" + brewInstallDirectory = "/Cellar/flytectl" +) + +var Client GHRepoService + +// FlytectlReleaseConfig represent the updater config for flytectl binary +var FlytectlReleaseConfig = &updater.Updater{ + Provider: &provider.Github{ + RepositoryURL: flytectlRepository, + ArchiveName: getFlytectlAssetName(), + }, + ExecutableName: flytectl, + Version: stdlibversion.Version, +} + +var ( + arch = platformutil.Arch(runtime.GOARCH) +) + +//go:generate mockery -name=GHRepoService -case=underscore + +type GHRepoService interface { + GetLatestRelease(ctx context.Context, owner, repo string) (*github.RepositoryRelease, *github.Response, error) + ListReleases(ctx context.Context, owner, repo string, opts *github.ListOptions) ([]*github.RepositoryRelease, *github.Response, error) + GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*github.RepositoryRelease, *github.Response, error) + GetCommitSHA1(ctx context.Context, owner, repo, ref, lastSHA string) (string, *github.Response, error) +} + +// GetLatestRelease returns the latest non-prerelease version of provided repoName, as +// described in https://docs.github.com/en/rest/reference/releases#get-the-latest-release +func GetLatestRelease(repoName string, g GHRepoService) (*github.RepositoryRelease, error) { + release, _, err := g.GetLatestRelease(context.Background(), owner, repoName) + if err != nil { + return nil, err + } + return release, err +} + +// ListReleases returns the list of release of provided repoName +func ListReleases(repoName string, g GHRepoService) ([]*github.RepositoryRelease, error) { + releases, _, err := g.ListReleases(context.Background(), owner, repoName, &github.ListOptions{ + PerPage: 100, + }) + if err != nil { + return nil, err + } + return releases, err +} + +// GetReleaseByTag returns the provided tag release if tag exist in repository +func GetReleaseByTag(repoName, tag string, g GHRepoService) (*github.RepositoryRelease, error) { + release, _, err := g.GetReleaseByTag(context.Background(), owner, repoName, tag) + if err != nil { + return nil, err + } + return release, err +} + +// GetCommitSHA1 returns sha hash against the version +func GetCommitSHA1(repoName, version string, g GHRepoService) (string, error) { + sha, _, err := g.GetCommitSHA1(context.Background(), owner, repoName, version, "") + if err != nil { + return "", err + } + return sha, err +} + +// GetAssetFromRelease returns the asset using assetName from github release with tag +func GetAssetFromRelease(tag, assetName, repoName string, g GHRepoService) (*github.ReleaseAsset, error) { + release, _, err := g.GetReleaseByTag(context.Background(), owner, repoName, tag) + if err != nil { + return nil, err + } + for _, v := range release.Assets { + if v.GetName() == assetName { + return v, nil + } + } + return nil, fmt.Errorf("assest is not found in %s[%s] release", repoName, tag) +} + +// GetSandboxImageSha returns the sha as per input +func GetSandboxImageSha(tag string, pre bool, g GHRepoService) (string, string, error) { + var release *github.RepositoryRelease + if len(tag) == 0 { + releases, err := ListReleases(flyte, g) + if err != nil { + return "", release.GetTagName(), err + } + for _, v := range releases { + // When pre-releases are allowed, simply choose the latest release + if pre { + release = v + break + } else if !*v.Prerelease { + release = v + break + } + } + logger.Infof(context.Background(), "starting with release %s", release.GetTagName()) + } else if len(tag) > 0 { + r, err := GetReleaseByTag(flyte, tag, g) + if err != nil { + return "", r.GetTagName(), err + } + release = r + } + isGreater, err := util.IsVersionGreaterThan(release.GetTagName(), sandboxSupportedVersion) + if err != nil { + return "", release.GetTagName(), err + } + if !isGreater { + return "", release.GetTagName(), fmt.Errorf("version flag only supported with flyte %s+ release", sandboxSupportedVersion) + } + sha, err := GetCommitSHA1(flyte, release.GetTagName(), g) + if err != nil { + return "", release.GetTagName(), err + } + return sha, release.GetTagName(), nil +} + +func getFlytectlAssetName() string { + if arch == platformutil.ArchAmd64 { + arch = platformutil.ArchX86 + } else if arch == platformutil.ArchX86 { + arch = platformutil.Archi386 + } + return fmt.Sprintf("flytectl_%s_%s.tar.gz", cases.Title(language.English).String(runtime.GOOS), arch.String()) +} + +// GetUpgradeMessage return the upgrade message +func GetUpgradeMessage(latest string, goos platformutil.Platform) (string, error) { + isGreater, err := util.IsVersionGreaterThan(latest, stdlibversion.Version) + if err != nil { + return "", err + } + + if !isGreater { + return "", err + } + message := fmt.Sprintf(commonMessage, stdlibversion.Version, latest) + + symlink, err := CheckBrewInstall(goos) + if err != nil { + return "", err + } + if len(symlink) > 0 { + message += brewMessage + } else if goos == platformutil.Darwin { + message += darwinMessage + } else if goos == platformutil.Linux { + message += linuxMessage + } + message += fmt.Sprintf(releaseURL, latest) + + return message, nil +} + +// CheckBrewInstall returns the path of symlink if flytectl is installed from brew +func CheckBrewInstall(goos platformutil.Platform) (string, error) { + if goos.String() == platformutil.Darwin.String() { + executable, err := FlytectlReleaseConfig.GetExecutable() + if err != nil { + return executable, err + } + if symlink, err := filepath.EvalSymlinks(executable); err != nil { + return symlink, err + } else if len(symlink) > 0 { + if strings.Contains(symlink, brewInstallDirectory) { + return symlink, nil + } + } + } + return "", nil +} + +// GetFullyQualifiedImageName Returns the sandbox image, version and error +// if no version is specified then the Latest release of cr.flyte.org/flyteorg/flyte-sandbox:dind-{SHA} is used +// else cr.flyte.org/flyteorg/flyte-sandbox:dind-{SHA}, where sha is derived from the version. +// If pre release is true then use latest pre release of Flyte, In that case User don't need to pass version + +func GetFullyQualifiedImageName(prefix, version, image string, pre bool, g GHRepoService) (string, string, error) { + sha, version, err := GetSandboxImageSha(version, pre, g) + if err != nil { + return "", version, err + } + + return fmt.Sprintf("%s:%s", image, fmt.Sprintf("%s-%s", prefix, sha)), version, nil +} + +// GetGHRepoService returns the initialized github repo service client. +func GetGHRepoService() GHRepoService { + if Client == nil { + var gh *github.Client + if len(os.Getenv("GITHUB_TOKEN")) > 0 { + gh = github.NewClient(oauth2.NewClient(context.Background(), oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")}, + ))) + if _, err := ListReleases(flyte, gh.Repositories); err != nil { + logger.Warnf(context.Background(), "Found GITHUB_TOKEN but failed to fetch releases. Using empty http.Client: %s.", err) + gh = nil + } + } + if gh == nil { + gh = github.NewClient(&http.Client{}) + } + return gh.Repositories + } + return Client +} diff --git a/flytectl/pkg/github/githubutil_test.go b/flytectl/pkg/github/githubutil_test.go new file mode 100644 index 0000000000..41247ac9e3 --- /dev/null +++ b/flytectl/pkg/github/githubutil_test.go @@ -0,0 +1,205 @@ +package github + +import ( + "fmt" + "runtime" + "strings" + "testing" + + stdlibversion "github.com/flyteorg/flyte/flytestdlib/version" + "github.com/flyteorg/flytectl/pkg/github/mocks" + "github.com/flyteorg/flytectl/pkg/platformutil" + "github.com/google/go-github/v42/github" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +var sandboxImageName = "cr.flyte.org/flyteorg/flyte-sandbox" + +func TestGetLatestVersion(t *testing.T) { + t.Run("Get latest release with wrong url", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetLatestReleaseMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("failed")) + _, err := GetLatestRelease("fl", mockGh) + assert.NotNil(t, err) + }) + t.Run("Get latest release", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetLatestReleaseMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, nil) + _, err := GetLatestRelease("flytectl", mockGh) + assert.Nil(t, err) + }) +} + +func TestGetLatestRelease(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v1.0.0" + mockGh.OnGetLatestReleaseMatch(mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + }, nil, nil) + release, err := GetLatestRelease("flyte", mockGh) + assert.Nil(t, err) + assert.Equal(t, true, strings.HasPrefix(release.GetTagName(), "v")) +} + +func TestCheckVersionExist(t *testing.T) { + t.Run("Invalid Tag", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("failed")) + _, err := GetReleaseByTag("v100.0.0", "flyte", mockGh) + assert.NotNil(t, err) + }) + t.Run("Valid Tag", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v1.0.0" + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + }, nil, nil) + release, err := GetReleaseByTag(tag, "flyte", mockGh) + assert.Nil(t, err) + assert.Equal(t, true, strings.HasPrefix(release.GetTagName(), "v")) + }) +} + +func TestGetFullyQualifiedImageName(t *testing.T) { + t.Run("Get tFully Qualified Image Name ", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v0.15.0" + isPreRelease := false + releases := []*github.RepositoryRelease{{ + TagName: &tag, + Prerelease: &isPreRelease, + }} + mockGh.OnListReleasesMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(releases, nil, nil) + mockGh.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(sandboxImageName, nil, nil) + image, tag, err := GetFullyQualifiedImageName("dind", "", sandboxImageName, false, mockGh) + assert.Nil(t, err) + assert.Equal(t, true, strings.HasPrefix(tag, "v")) + assert.Equal(t, true, strings.HasPrefix(image, sandboxImageName)) + }) + t.Run("Get Fully Qualified Image Name with pre release", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v0.15.0-pre" + isPreRelease := true + releases := []*github.RepositoryRelease{{ + TagName: &tag, + Prerelease: &isPreRelease, + }} + mockGh.OnListReleasesMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(releases, nil, nil) + mockGh.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(sandboxImageName, nil, nil) + image, tag, err := GetFullyQualifiedImageName("dind", "", sandboxImageName, isPreRelease, mockGh) + assert.Nil(t, err) + assert.Equal(t, true, strings.HasPrefix(tag, "v")) + assert.Equal(t, true, strings.HasPrefix(image, sandboxImageName)) + }) + t.Run("Get Fully Qualified Image Name with specific version", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v0.19.0" + isPreRelease := true + release := &github.RepositoryRelease{ + TagName: &tag, + Prerelease: &isPreRelease, + } + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(release, nil, nil) + mockGh.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(sandboxImageName, nil, nil) + image, tag, err := GetFullyQualifiedImageName("dind", "v0.19.0", sandboxImageName, isPreRelease, mockGh) + assert.Nil(t, err) + assert.Equal(t, "v0.19.0", tag) + assert.Equal(t, true, strings.HasPrefix(image, sandboxImageName)) + }) +} + +func TestGetSHAFromVersion(t *testing.T) { + t.Run("Invalid Tag", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("", nil, fmt.Errorf("failed")) + _, err := GetCommitSHA1("v100.0.0", "flyte", mockGh) + assert.NotNil(t, err) + }) + t.Run("Valid Tag", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("v1.15.0", nil, nil) + release, err := GetCommitSHA1("v0.15.0", "flyte", mockGh) + assert.Nil(t, err) + assert.Greater(t, len(release), 0) + }) +} + +func TestGetAssetsFromRelease(t *testing.T) { + t.Run("Successful get assets", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + tag := "v0.15.0" + sandboxManifest := "flyte_sandbox_manifest.yaml" + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + Assets: []*github.ReleaseAsset{{ + Name: &sandboxManifest, + }, + }, + }, nil, nil) + assets, err := GetAssetFromRelease(tag, sandboxManifest, flyte, mockGh) + assert.Nil(t, err) + assert.NotNil(t, assets) + assert.Equal(t, sandboxManifest, *assets.Name) + }) + + t.Run("Failed get assets with wrong name", func(t *testing.T) { + mockGh := &mocks.GHRepoService{} + mockGh.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("failed")) + assets, err := GetAssetFromRelease("v0.15.0", "test", flyte, mockGh) + assert.NotNil(t, err) + assert.Nil(t, assets) + }) +} + +func TestGetAssetsName(t *testing.T) { + t.Run("Get Assets name", func(t *testing.T) { + expected := fmt.Sprintf("flytectl_%s_386.tar.gz", cases.Title(language.English).String(runtime.GOOS)) + arch = platformutil.Arch386 + assert.Equal(t, expected, getFlytectlAssetName()) + }) +} + +func TestCheckBrewInstall(t *testing.T) { + symlink, err := CheckBrewInstall(platformutil.Darwin) + assert.Nil(t, err) + assert.Equal(t, len(symlink), 0) + symlink, err = CheckBrewInstall(platformutil.Linux) + assert.Nil(t, err) + assert.Equal(t, 0, len(symlink)) +} + +func TestGetUpgradeMessage(t *testing.T) { + var darwin = platformutil.Darwin + var linux = platformutil.Linux + var windows = platformutil.Linux + + var version = "v0.2.20" + stdlibversion.Version = "v0.2.10" + message, err := GetUpgradeMessage(version, darwin) + assert.Nil(t, err) + assert.Equal(t, 157, len(message)) + + version = "v0.2.09" + message, err = GetUpgradeMessage(version, darwin) + assert.Nil(t, err) + assert.Equal(t, 0, len(message)) + + version = "v" + message, err = GetUpgradeMessage(version, darwin) + assert.NotNil(t, err) + assert.Equal(t, 0, len(message)) + + version = "v0.2.20" + message, err = GetUpgradeMessage(version, windows) + assert.Nil(t, err) + assert.Equal(t, 157, len(message)) + + version = "v0.2.20" + message, err = GetUpgradeMessage(version, linux) + assert.Nil(t, err) + assert.Equal(t, 157, len(message)) +} diff --git a/flytectl/pkg/github/mocks/gh_repo_service.go b/flytectl/pkg/github/mocks/gh_repo_service.go new file mode 100644 index 0000000000..fd3f932152 --- /dev/null +++ b/flytectl/pkg/github/mocks/gh_repo_service.go @@ -0,0 +1,213 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + github "github.com/google/go-github/v42/github" + mock "github.com/stretchr/testify/mock" +) + +// GHRepoService is an autogenerated mock type for the GHRepoService type +type GHRepoService struct { + mock.Mock +} + +type GHRepoService_GetCommitSHA1 struct { + *mock.Call +} + +func (_m GHRepoService_GetCommitSHA1) Return(_a0 string, _a1 *github.Response, _a2 error) *GHRepoService_GetCommitSHA1 { + return &GHRepoService_GetCommitSHA1{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *GHRepoService) OnGetCommitSHA1(ctx context.Context, owner string, repo string, ref string, lastSHA string) *GHRepoService_GetCommitSHA1 { + c_call := _m.On("GetCommitSHA1", ctx, owner, repo, ref, lastSHA) + return &GHRepoService_GetCommitSHA1{Call: c_call} +} + +func (_m *GHRepoService) OnGetCommitSHA1Match(matchers ...interface{}) *GHRepoService_GetCommitSHA1 { + c_call := _m.On("GetCommitSHA1", matchers...) + return &GHRepoService_GetCommitSHA1{Call: c_call} +} + +// GetCommitSHA1 provides a mock function with given fields: ctx, owner, repo, ref, lastSHA +func (_m *GHRepoService) GetCommitSHA1(ctx context.Context, owner string, repo string, ref string, lastSHA string) (string, *github.Response, error) { + ret := _m.Called(ctx, owner, repo, ref, lastSHA) + + var r0 string + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) string); ok { + r0 = rf(ctx, owner, repo, ref, lastSHA) + } else { + r0 = ret.Get(0).(string) + } + + var r1 *github.Response + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) *github.Response); ok { + r1 = rf(ctx, owner, repo, ref, lastSHA) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*github.Response) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, string, string, string) error); ok { + r2 = rf(ctx, owner, repo, ref, lastSHA) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type GHRepoService_GetLatestRelease struct { + *mock.Call +} + +func (_m GHRepoService_GetLatestRelease) Return(_a0 *github.RepositoryRelease, _a1 *github.Response, _a2 error) *GHRepoService_GetLatestRelease { + return &GHRepoService_GetLatestRelease{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *GHRepoService) OnGetLatestRelease(ctx context.Context, owner string, repo string) *GHRepoService_GetLatestRelease { + c_call := _m.On("GetLatestRelease", ctx, owner, repo) + return &GHRepoService_GetLatestRelease{Call: c_call} +} + +func (_m *GHRepoService) OnGetLatestReleaseMatch(matchers ...interface{}) *GHRepoService_GetLatestRelease { + c_call := _m.On("GetLatestRelease", matchers...) + return &GHRepoService_GetLatestRelease{Call: c_call} +} + +// GetLatestRelease provides a mock function with given fields: ctx, owner, repo +func (_m *GHRepoService) GetLatestRelease(ctx context.Context, owner string, repo string) (*github.RepositoryRelease, *github.Response, error) { + ret := _m.Called(ctx, owner, repo) + + var r0 *github.RepositoryRelease + if rf, ok := ret.Get(0).(func(context.Context, string, string) *github.RepositoryRelease); ok { + r0 = rf(ctx, owner, repo) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*github.RepositoryRelease) + } + } + + var r1 *github.Response + if rf, ok := ret.Get(1).(func(context.Context, string, string) *github.Response); ok { + r1 = rf(ctx, owner, repo) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*github.Response) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok { + r2 = rf(ctx, owner, repo) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type GHRepoService_GetReleaseByTag struct { + *mock.Call +} + +func (_m GHRepoService_GetReleaseByTag) Return(_a0 *github.RepositoryRelease, _a1 *github.Response, _a2 error) *GHRepoService_GetReleaseByTag { + return &GHRepoService_GetReleaseByTag{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *GHRepoService) OnGetReleaseByTag(ctx context.Context, owner string, repo string, tag string) *GHRepoService_GetReleaseByTag { + c_call := _m.On("GetReleaseByTag", ctx, owner, repo, tag) + return &GHRepoService_GetReleaseByTag{Call: c_call} +} + +func (_m *GHRepoService) OnGetReleaseByTagMatch(matchers ...interface{}) *GHRepoService_GetReleaseByTag { + c_call := _m.On("GetReleaseByTag", matchers...) + return &GHRepoService_GetReleaseByTag{Call: c_call} +} + +// GetReleaseByTag provides a mock function with given fields: ctx, owner, repo, tag +func (_m *GHRepoService) GetReleaseByTag(ctx context.Context, owner string, repo string, tag string) (*github.RepositoryRelease, *github.Response, error) { + ret := _m.Called(ctx, owner, repo, tag) + + var r0 *github.RepositoryRelease + if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *github.RepositoryRelease); ok { + r0 = rf(ctx, owner, repo, tag) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*github.RepositoryRelease) + } + } + + var r1 *github.Response + if rf, ok := ret.Get(1).(func(context.Context, string, string, string) *github.Response); ok { + r1 = rf(ctx, owner, repo, tag) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*github.Response) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, string, string) error); ok { + r2 = rf(ctx, owner, repo, tag) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type GHRepoService_ListReleases struct { + *mock.Call +} + +func (_m GHRepoService_ListReleases) Return(_a0 []*github.RepositoryRelease, _a1 *github.Response, _a2 error) *GHRepoService_ListReleases { + return &GHRepoService_ListReleases{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *GHRepoService) OnListReleases(ctx context.Context, owner string, repo string, opts *github.ListOptions) *GHRepoService_ListReleases { + c_call := _m.On("ListReleases", ctx, owner, repo, opts) + return &GHRepoService_ListReleases{Call: c_call} +} + +func (_m *GHRepoService) OnListReleasesMatch(matchers ...interface{}) *GHRepoService_ListReleases { + c_call := _m.On("ListReleases", matchers...) + return &GHRepoService_ListReleases{Call: c_call} +} + +// ListReleases provides a mock function with given fields: ctx, owner, repo, opts +func (_m *GHRepoService) ListReleases(ctx context.Context, owner string, repo string, opts *github.ListOptions) ([]*github.RepositoryRelease, *github.Response, error) { + ret := _m.Called(ctx, owner, repo, opts) + + var r0 []*github.RepositoryRelease + if rf, ok := ret.Get(0).(func(context.Context, string, string, *github.ListOptions) []*github.RepositoryRelease); ok { + r0 = rf(ctx, owner, repo, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*github.RepositoryRelease) + } + } + + var r1 *github.Response + if rf, ok := ret.Get(1).(func(context.Context, string, string, *github.ListOptions) *github.Response); ok { + r1 = rf(ctx, owner, repo, opts) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*github.Response) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, string, string, *github.ListOptions) error); ok { + r2 = rf(ctx, owner, repo, opts) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} diff --git a/flytectl/pkg/k8s/k8s.go b/flytectl/pkg/k8s/k8s.go new file mode 100644 index 0000000000..f185e53d2e --- /dev/null +++ b/flytectl/pkg/k8s/k8s.go @@ -0,0 +1,133 @@ +package k8s + +import ( + "fmt" + "os" + + "github.com/enescakir/emoji" + "github.com/pkg/errors" + "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type K8s interface { + CoreV1() corev1.CoreV1Interface +} + +//go:generate mockery -name=ContextOps -case=underscore +type ContextOps interface { + CheckConfig() error + CopyContext(srcConfigAccess clientcmd.ConfigAccess, srcCtxName, targetCtxName, targetNamespace string) error + RemoveContext(ctxName string) error +} + +// ContextManager context manager implementing ContextOps +type ContextManager struct { + configAccess clientcmd.ConfigAccess +} + +func NewK8sContextManager() ContextOps { + if ContextMgr != nil { + return ContextMgr + } + ContextMgr = &ContextManager{ + configAccess: clientcmd.NewDefaultPathOptions(), + } + return ContextMgr +} + +var Client K8s +var ContextMgr ContextOps + +// GetK8sClient return the k8s client from sandbox kubeconfig +func GetK8sClient(cfg, master string) (K8s, error) { + kubeConfigPath := os.ExpandEnv(cfg) + kubecfg, err := clientcmd.BuildConfigFromFlags(master, kubeConfigPath) + if err != nil { + return nil, errors.Wrapf(err, "Error building kubeconfig") + } + if Client == nil { + kubeClient, err := kubernetes.NewForConfig(kubecfg) + if err != nil { + return nil, errors.Wrapf(err, "Error building kubernetes clientset") + } + return kubeClient, nil + } + return Client, nil +} + +// CheckConfig checks if the kubeConfig pointed to by configAccess exists +func (k *ContextManager) CheckConfig() error { + _, err := k.configAccess.GetStartingConfig() + return err +} + +// CopyContext copies context srcCtxName part of srcConfigAccess to targetCtxName part of targetConfigAccess. +func (k *ContextManager) CopyContext(srcConfigAccess clientcmd.ConfigAccess, srcCtxName, targetCtxName, targetNamespace string) error { + err := k.CheckConfig() + if err != nil { + return err + } + + fromStartingConfig, err := srcConfigAccess.GetStartingConfig() + if err != nil { + return err + } + _, exists := fromStartingConfig.Contexts[srcCtxName] + if !exists { + return fmt.Errorf("context %v doesn't exist", srcCtxName) + } + + toStartingConfig, err := k.configAccess.GetStartingConfig() + if err != nil { + return err + } + + _, exists = toStartingConfig.Contexts[targetCtxName] + if exists { + fmt.Printf("%v Context %q already exists. Overwriting it!\n", emoji.FactoryWorker, targetCtxName) + } else { + toStartingConfig.Contexts[targetCtxName] = clientcmdapi.NewContext() + } + + toStartingConfig.Clusters[targetCtxName] = fromStartingConfig.Clusters[srcCtxName] + toStartingConfig.Clusters[targetCtxName].LocationOfOrigin = k.configAccess.GetDefaultFilename() + toStartingConfig.AuthInfos[targetCtxName] = fromStartingConfig.AuthInfos[srcCtxName] + toStartingConfig.AuthInfos[targetCtxName].LocationOfOrigin = k.configAccess.GetDefaultFilename() + toStartingConfig.Contexts[targetCtxName].Cluster = targetCtxName + toStartingConfig.Contexts[targetCtxName].AuthInfo = targetCtxName + toStartingConfig.Contexts[targetCtxName].Namespace = targetNamespace + toStartingConfig.CurrentContext = targetCtxName + if err := clientcmd.ModifyConfig(k.configAccess, *toStartingConfig, true); err != nil { + return err + } + + fmt.Printf("%v Activated context %q!\n", emoji.FactoryWorker, targetCtxName) + return nil +} + +// RemoveKubeContext removes the contextToRemove from the kubeContext pointed to be fromConfigAccess +func (k *ContextManager) RemoveContext(ctxName string) error { + fromStartingConfig, err := k.configAccess.GetStartingConfig() + if err != nil { + return err + } + _, exists := fromStartingConfig.Contexts[ctxName] + if !exists { + return fmt.Errorf("context %v doesn't exist", ctxName) + } + + delete(fromStartingConfig.Clusters, ctxName) + delete(fromStartingConfig.AuthInfos, ctxName) + delete(fromStartingConfig.Contexts, ctxName) + fromStartingConfig.CurrentContext = "" + + if err := clientcmd.ModifyConfig(k.configAccess, *fromStartingConfig, true); err != nil { + return err + } + + fmt.Printf("context removed for %q.\n", ctxName) + return nil +} diff --git a/flytectl/pkg/k8s/k8s_test.go b/flytectl/pkg/k8s/k8s_test.go new file mode 100644 index 0000000000..84dc16923c --- /dev/null +++ b/flytectl/pkg/k8s/k8s_test.go @@ -0,0 +1,65 @@ +package k8s + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + testclient "k8s.io/client-go/kubernetes/fake" +) + +func TestGetK8sClient(t *testing.T) { + content := ` +apiVersion: v1 +clusters: +- cluster: + server: https://localhost:8080 + extensions: + - name: client.authentication.k8s.io/exec + extension: + audience: foo + other: bar + name: foo-cluster +contexts: +- context: + cluster: foo-cluster + user: foo-user + namespace: bar + name: foo-context +current-context: foo-context +kind: Config +users: +- name: foo-user + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - arg-1 + - arg-2 + command: foo-command + provideClusterInfo: true +` + tmpfile, err := ioutil.TempFile("", "kubeconfig") + if err != nil { + t.Error(err) + } + defer os.Remove(tmpfile.Name()) + if err := ioutil.WriteFile(tmpfile.Name(), []byte(content), os.ModePerm); err != nil { + t.Error(err) + } + t.Run("Create client from config", func(t *testing.T) { + client := testclient.NewSimpleClientset() + Client = client + c, err := GetK8sClient(tmpfile.Name(), "https://localhost:8080") + assert.Nil(t, err) + assert.NotNil(t, c) + }) + t.Run("Create client from config", func(t *testing.T) { + Client = nil + client, err := GetK8sClient(tmpfile.Name(), "https://localhost:8080") + assert.Nil(t, err) + assert.NotNil(t, client) + }) + +} diff --git a/flytectl/pkg/k8s/mocks/context_ops.go b/flytectl/pkg/k8s/mocks/context_ops.go new file mode 100644 index 0000000000..74bd6c7587 --- /dev/null +++ b/flytectl/pkg/k8s/mocks/context_ops.go @@ -0,0 +1,110 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + clientcmd "k8s.io/client-go/tools/clientcmd" + + mock "github.com/stretchr/testify/mock" +) + +// ContextOps is an autogenerated mock type for the ContextOps type +type ContextOps struct { + mock.Mock +} + +type ContextOps_CheckConfig struct { + *mock.Call +} + +func (_m ContextOps_CheckConfig) Return(_a0 error) *ContextOps_CheckConfig { + return &ContextOps_CheckConfig{Call: _m.Call.Return(_a0)} +} + +func (_m *ContextOps) OnCheckConfig() *ContextOps_CheckConfig { + c_call := _m.On("CheckConfig") + return &ContextOps_CheckConfig{Call: c_call} +} + +func (_m *ContextOps) OnCheckConfigMatch(matchers ...interface{}) *ContextOps_CheckConfig { + c_call := _m.On("CheckConfig", matchers...) + return &ContextOps_CheckConfig{Call: c_call} +} + +// CheckConfig provides a mock function with given fields: +func (_m *ContextOps) CheckConfig() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type ContextOps_CopyContext struct { + *mock.Call +} + +func (_m ContextOps_CopyContext) Return(_a0 error) *ContextOps_CopyContext { + return &ContextOps_CopyContext{Call: _m.Call.Return(_a0)} +} + +func (_m *ContextOps) OnCopyContext(srcConfigAccess clientcmd.ConfigAccess, srcCtxName string, targetCtxName string, targetNamespace string) *ContextOps_CopyContext { + c_call := _m.On("CopyContext", srcConfigAccess, srcCtxName, targetCtxName, targetNamespace) + return &ContextOps_CopyContext{Call: c_call} +} + +func (_m *ContextOps) OnCopyContextMatch(matchers ...interface{}) *ContextOps_CopyContext { + c_call := _m.On("CopyContext", matchers...) + return &ContextOps_CopyContext{Call: c_call} +} + +// CopyContext provides a mock function with given fields: srcConfigAccess, srcCtxName, targetCtxName, targetNamespace +func (_m *ContextOps) CopyContext(srcConfigAccess clientcmd.ConfigAccess, srcCtxName string, targetCtxName string, targetNamespace string) error { + ret := _m.Called(srcConfigAccess, srcCtxName, targetCtxName, targetNamespace) + + var r0 error + if rf, ok := ret.Get(0).(func(clientcmd.ConfigAccess, string, string, string) error); ok { + r0 = rf(srcConfigAccess, srcCtxName, targetCtxName, targetNamespace) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type ContextOps_RemoveContext struct { + *mock.Call +} + +func (_m ContextOps_RemoveContext) Return(_a0 error) *ContextOps_RemoveContext { + return &ContextOps_RemoveContext{Call: _m.Call.Return(_a0)} +} + +func (_m *ContextOps) OnRemoveContext(ctxName string) *ContextOps_RemoveContext { + c_call := _m.On("RemoveContext", ctxName) + return &ContextOps_RemoveContext{Call: c_call} +} + +func (_m *ContextOps) OnRemoveContextMatch(matchers ...interface{}) *ContextOps_RemoveContext { + c_call := _m.On("RemoveContext", matchers...) + return &ContextOps_RemoveContext{Call: c_call} +} + +// RemoveContext provides a mock function with given fields: ctxName +func (_m *ContextOps) RemoveContext(ctxName string) error { + ret := _m.Called(ctxName) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(ctxName) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/pkg/pkce/testdata/empty_access_token.json b/flytectl/pkg/pkce/testdata/empty_access_token.json new file mode 100644 index 0000000000..474f4762e0 --- /dev/null +++ b/flytectl/pkg/pkce/testdata/empty_access_token.json @@ -0,0 +1,6 @@ +{ + "access_token":"", + "token_type":"bearer", + "refresh_token":"eyJhbGciOiJSUzI1NiIsImtleV9pZCI6IjlLZlNILXphZjRjY1dmTlNPbm91YmZUbnItVW5kMHVuY3ctWF9KNUJVdWciLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiaHR0cHM6Ly9kZW1vLm51Y2x5ZGUuaW8iXSwiY2xpZW50X2lkIjoiZmx5dGVjdGwiLCJleHAiOjE2MTk1MzM1MjcsImZvcm0iOnsiY29kZV9jaGFsbGVuZ2UiOiJ2bWNxazArZnJRS3Vvb2FMUHZwUDJCeUtod2VKR2VaeG1mdGtkMml0T042Tk13SVBQNWwySmNpWDd3NTdlaS9iVW1LTWhPSjJVUERnK0F5RXRaTG94SFJiMDl1cWRKSSIsImNvZGVfY2hhbGxlbmdlX21ldGhvZCI6IlN2WEgyeDh2UDUrSkJxQ0NjT2dCL0hNWjdLSmE3bkdLMDBaUVA0ekd4WGcifSwiaWF0IjoxNjE5NTAyNTM1LCJpc3MiOiJodHRwczovL2RlbW8ubnVjbHlkZS5pbyIsImp0aSI6IjQzMTM1ZWY2LTA5NjEtNGFlZC1hOTYxLWQyZGI1YWJmM2U1YyIsInNjcCI6WyJvZmZsaW5lIiwiZi5hbGwiLCJhY2Nlc3NfdG9rZW4iXSwic3ViIjoiMTE0NTI3ODE1MzA1MTI4OTc0NDcwIiwidXNlcl9pbmZvIjp7ImZhbWlseV9uYW1lIjoiTWFoaW5kcmFrYXIiLCJnaXZlbl9uYW1lIjoiUHJhZnVsbGEiLCJuYW1lIjoiUHJhZnVsbGEgTWFoaW5kcmFrYXIiLCJwaWN0dXJlIjoiaHR0cHM6Ly9saDMuZ29vZ2xldXNlcmNvbnRlbnQuY29tL2EtL0FPaDE0R2p1VDFrOC04YTV2QkdPSUYxYURnaFltRng4aEQ5S05pUjVqblp1PXM5Ni1jIiwic3ViamVjdCI6IjExNDUyNzgxNTMwNTEyODk3NDQ3MCJ9fQ.YKom5-gE4e84rJJIfxcpbMzgjZT33UZ27UTa1y8pK2BAWaPjIZtwudwDHQ5Rd3m0mJJWhBp0j0e8h9DvzBUdpsnGMXSCYKP-ag9y9k5OW59FMm9RqIakWHtj6NPnxGO1jAsaNCYePj8knR7pBLCLCse2taDHUJ8RU1F0DeHNr2y-JupgG5y1vjBcb-9eD8OwOSTp686_hm7XoJlxiKx8dj2O7HPH7M2pAHA_0bVrKKj7Y_s3fRhkm_Aq6LRdA-IiTl9xJQxgVUreejls9-RR9mSTKj6A81-Isz3qAUttVVaA4OT5OdW879_yT7OSLw_QwpXzNZ7qOR7OIpmL_xZXig", + "expiry":"2021-04-27T19:55:26.658635+05:30" +} \ No newline at end of file diff --git a/flytectl/pkg/pkce/testdata/token.json b/flytectl/pkg/pkce/testdata/token.json new file mode 100644 index 0000000000..721cecc5f6 --- /dev/null +++ b/flytectl/pkg/pkce/testdata/token.json @@ -0,0 +1,6 @@ +{ + "access_token":"eyJhbGciOiJSUzI1NiIsImtleV9pZCI6IjlLZlNILXphZjRjY1dmTlNPbm91YmZUbnItVW5kMHVuY3ctWF9KNUJVdWciLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiaHR0cHM6Ly9kZW1vLm51Y2x5ZGUuaW8iXSwiY2xpZW50X2lkIjoiZmx5dGVjdGwiLCJleHAiOjE2MTk1Mjk5MjcsImZvcm0iOnsiY29kZV9jaGFsbGVuZ2UiOiJ2bWNxazArZnJRS3Vvb2FMUHZwUDJCeUtod2VKR2VaeG1mdGtkMml0T042Tk13SVBQNWwySmNpWDd3NTdlaS9iVW1LTWhPSjJVUERnK0F5RXRaTG94SFJiMDl1cWRKSSIsImNvZGVfY2hhbGxlbmdlX21ldGhvZCI6IlN2WEgyeDh2UDUrSkJxQ0NjT2dCL0hNWjdLSmE3bkdLMDBaUVA0ekd4WGcifSwiaWF0IjoxNjE5NTAyNTM1LCJpc3MiOiJodHRwczovL2RlbW8ubnVjbHlkZS5pbyIsImp0aSI6IjQzMTM1ZWY2LTA5NjEtNGFlZC1hOTYxLWQyZGI1YWJmM2U1YyIsInNjcCI6WyJvZmZsaW5lIiwiYWxsIiwiYWNjZXNzX3Rva2VuIl0sInN1YiI6IjExNDUyNzgxNTMwNTEyODk3NDQ3MCIsInVzZXJfaW5mbyI6eyJmYW1pbHlfbmFtZSI6Ik1haGluZHJha2FyIiwiZ2l2ZW5fbmFtZSI6IlByYWZ1bGxhIiwibmFtZSI6IlByYWZ1bGxhIE1haGluZHJha2FyIiwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hLS9BT2gxNEdqdVQxazgtOGE1dkJHT0lGMWFEZ2hZbUZ4OGhEOUtOaVI1am5adT1zOTYtYyIsInN1YmplY3QiOiIxMTQ1Mjc4MTUzMDUxMjg5NzQ0NzAifX0.ojbUOy2tF6HL8fIp1FJAQchU2MimlVMr3EGVPxMvYyahpW5YsWh6mz7qn4vpEnBuYZDf6cTaN50pJ8krlDX9RqtxF3iEfV2ZYHwyKMThI9sWh_kEBgGwUpyHyk98ZeqQX1uFOH3iwwhR-lPPUlpgdFGzKsxfxeFLOtu1y0V7BgA08KFqgYzl0lJqDYWBkJh_wUAv5g_r0NzSQCsMqb-B3Lno5ScMnlA3SZ_Hg-XdW8hnFIlrwJj4Cv47j3fcZxpqLbTNDXWWogmRbJb3YPlgn_LEnRAyZnFERHKMCE9vaBSTu-1Qstp-gRTORjyV7l3y680dEygQS-99KV3OSBlz6g", + "token_type":"bearer", + "refresh_token":"eyJhbGciOiJSUzI1NiIsImtleV9pZCI6IjlLZlNILXphZjRjY1dmTlNPbm91YmZUbnItVW5kMHVuY3ctWF9KNUJVdWciLCJ0eXAiOiJKV1QifQ.eyJhdWQiOlsiaHR0cHM6Ly9kZW1vLm51Y2x5ZGUuaW8iXSwiY2xpZW50X2lkIjoiZmx5dGVjdGwiLCJleHAiOjE2MTk1MzM1MjcsImZvcm0iOnsiY29kZV9jaGFsbGVuZ2UiOiJ2bWNxazArZnJRS3Vvb2FMUHZwUDJCeUtod2VKR2VaeG1mdGtkMml0T042Tk13SVBQNWwySmNpWDd3NTdlaS9iVW1LTWhPSjJVUERnK0F5RXRaTG94SFJiMDl1cWRKSSIsImNvZGVfY2hhbGxlbmdlX21ldGhvZCI6IlN2WEgyeDh2UDUrSkJxQ0NjT2dCL0hNWjdLSmE3bkdLMDBaUVA0ekd4WGcifSwiaWF0IjoxNjE5NTAyNTM1LCJpc3MiOiJodHRwczovL2RlbW8ubnVjbHlkZS5pbyIsImp0aSI6IjQzMTM1ZWY2LTA5NjEtNGFlZC1hOTYxLWQyZGI1YWJmM2U1YyIsInNjcCI6WyJvZmZsaW5lIiwiZi5hbGwiLCJhY2Nlc3NfdG9rZW4iXSwic3ViIjoiMTE0NTI3ODE1MzA1MTI4OTc0NDcwIiwidXNlcl9pbmZvIjp7ImZhbWlseV9uYW1lIjoiTWFoaW5kcmFrYXIiLCJnaXZlbl9uYW1lIjoiUHJhZnVsbGEiLCJuYW1lIjoiUHJhZnVsbGEgTWFoaW5kcmFrYXIiLCJwaWN0dXJlIjoiaHR0cHM6Ly9saDMuZ29vZ2xldXNlcmNvbnRlbnQuY29tL2EtL0FPaDE0R2p1VDFrOC04YTV2QkdPSUYxYURnaFltRng4aEQ5S05pUjVqblp1PXM5Ni1jIiwic3ViamVjdCI6IjExNDUyNzgxNTMwNTEyODk3NDQ3MCJ9fQ.YKom5-gE4e84rJJIfxcpbMzgjZT33UZ27UTa1y8pK2BAWaPjIZtwudwDHQ5Rd3m0mJJWhBp0j0e8h9DvzBUdpsnGMXSCYKP-ag9y9k5OW59FMm9RqIakWHtj6NPnxGO1jAsaNCYePj8knR7pBLCLCse2taDHUJ8RU1F0DeHNr2y-JupgG5y1vjBcb-9eD8OwOSTp686_hm7XoJlxiKx8dj2O7HPH7M2pAHA_0bVrKKj7Y_s3fRhkm_Aq6LRdA-IiTl9xJQxgVUreejls9-RR9mSTKj6A81-Isz3qAUttVVaA4OT5OdW879_yT7OSLw_QwpXzNZ7qOR7OIpmL_xZXig", + "expiry":"2021-04-27T19:55:26.658635+05:30" +} \ No newline at end of file diff --git a/flytectl/pkg/pkce/token_cache_keyring.go b/flytectl/pkg/pkce/token_cache_keyring.go new file mode 100644 index 0000000000..119fea5033 --- /dev/null +++ b/flytectl/pkg/pkce/token_cache_keyring.go @@ -0,0 +1,58 @@ +package pkce + +import ( + "encoding/json" + "fmt" + + "github.com/zalando/go-keyring" + "golang.org/x/oauth2" +) + +// TokenCacheKeyringProvider wraps the logic to save and retrieve tokens from the OS's keyring implementation. +type TokenCacheKeyringProvider struct { + ServiceName string + ServiceUser string +} + +const ( + KeyRingServiceUser = "flytectl-user" + KeyRingServiceName = "flytectl" +) + +func (t TokenCacheKeyringProvider) SaveToken(token *oauth2.Token) error { + var tokenBytes []byte + if token.AccessToken == "" { + return fmt.Errorf("cannot save empty token with expiration %v", token.Expiry) + } + + var err error + if tokenBytes, err = json.Marshal(token); err != nil { + return fmt.Errorf("unable to marshal token to save in cache due to %w", err) + } + + // set token in keyring + if err = keyring.Set(t.ServiceName, t.ServiceUser, string(tokenBytes)); err != nil { + return fmt.Errorf("unable to save token. Error: %w", err) + } + + return nil +} + +func (t TokenCacheKeyringProvider) GetToken() (*oauth2.Token, error) { + // get saved token + tokenJSON, err := keyring.Get(t.ServiceName, t.ServiceUser) + if len(tokenJSON) == 0 { + return nil, fmt.Errorf("no token found in the cache") + } + + if err != nil { + return nil, err + } + + token := oauth2.Token{} + if err = json.Unmarshal([]byte(tokenJSON), &token); err != nil { + return nil, fmt.Errorf("unmarshalling error for saved token. Error: %w", err) + } + + return &token, nil +} diff --git a/flytectl/pkg/pkce/token_cache_keyring_test.go b/flytectl/pkg/pkce/token_cache_keyring_test.go new file mode 100644 index 0000000000..11946b677d --- /dev/null +++ b/flytectl/pkg/pkce/token_cache_keyring_test.go @@ -0,0 +1,64 @@ +package pkce + +import ( + "encoding/json" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/zalando/go-keyring" + "golang.org/x/oauth2" +) + +func TestSaveAndGetToken(t *testing.T) { + keyring.MockInit() + tokenCacheProvider := TokenCacheKeyringProvider{ + ServiceUser: "testServiceUser", + ServiceName: "testServiceName", + } + + t.Run("Valid Save/Get Token", func(t *testing.T) { + plan, _ := ioutil.ReadFile("testdata/token.json") + var tokenData oauth2.Token + err := json.Unmarshal(plan, &tokenData) + assert.NoError(t, err) + err = tokenCacheProvider.SaveToken(&tokenData) + assert.NoError(t, err) + var savedToken *oauth2.Token + savedToken, err = tokenCacheProvider.GetToken() + assert.NoError(t, err) + assert.NotNil(t, savedToken) + assert.Equal(t, tokenData.AccessToken, savedToken.AccessToken) + assert.Equal(t, tokenData.TokenType, savedToken.TokenType) + assert.Equal(t, tokenData.Expiry, savedToken.Expiry) + }) + + t.Run("Empty access token Save", func(t *testing.T) { + plan, _ := ioutil.ReadFile("testdata/empty_access_token.json") + var tokenData oauth2.Token + var err error + err = json.Unmarshal(plan, &tokenData) + assert.NoError(t, err) + + err = tokenCacheProvider.SaveToken(&tokenData) + assert.Error(t, err) + }) + + t.Run("Different service name", func(t *testing.T) { + plan, _ := ioutil.ReadFile("testdata/token.json") + var tokenData oauth2.Token + err := json.Unmarshal(plan, &tokenData) + assert.NoError(t, err) + err = tokenCacheProvider.SaveToken(&tokenData) + assert.NoError(t, err) + tokenCacheProvider2 := TokenCacheKeyringProvider{ + ServiceUser: "testServiceUser2", + ServiceName: "testServiceName2", + } + + var savedToken *oauth2.Token + savedToken, err = tokenCacheProvider2.GetToken() + assert.Error(t, err) + assert.Nil(t, savedToken) + }) +} diff --git a/flytectl/pkg/platformutil/platformutil.go b/flytectl/pkg/platformutil/platformutil.go new file mode 100644 index 0000000000..064a6d8e14 --- /dev/null +++ b/flytectl/pkg/platformutil/platformutil.go @@ -0,0 +1,26 @@ +package platformutil + +type Arch string + +const ( + ArchAmd64 Arch = "amd64" + ArchX86 Arch = "x86_64" + Arch386 Arch = "386" + Archi386 Arch = "i386" +) + +func (a Arch) String() string { + return string(a) +} + +type Platform string + +const ( + Windows Platform = "windows" + Linux Platform = "linux" + Darwin Platform = "darwin" +) + +func (p Platform) String() string { + return string(p) +} diff --git a/flytectl/pkg/platformutil/platformutil_test.go b/flytectl/pkg/platformutil/platformutil_test.go new file mode 100644 index 0000000000..8c7ee7f81d --- /dev/null +++ b/flytectl/pkg/platformutil/platformutil_test.go @@ -0,0 +1,39 @@ +package platformutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestArch(t *testing.T) { + var amd64 = ArchAmd64 + assert.NotNil(t, amd64) + assert.Equal(t, "amd64", amd64.String()) + + var arch386 = Arch386 + assert.NotNil(t, arch386) + assert.Equal(t, "386", arch386.String()) + + var i386 = Archi386 + assert.NotNil(t, i386) + assert.Equal(t, "i386", i386.String()) + + var x8664 = ArchX86 + assert.NotNil(t, x8664) + assert.Equal(t, "x86_64", x8664.String()) +} + +func TestGoosEnum(t *testing.T) { + var linux = Linux + assert.NotNil(t, linux) + assert.Equal(t, "linux", linux.String()) + + var windows = Windows + assert.NotNil(t, windows) + assert.Equal(t, "windows", windows.String()) + + var darwin = Darwin + assert.NotNil(t, darwin) + assert.Equal(t, "darwin", darwin.String()) +} diff --git a/flytectl/pkg/printer/outputformat_enumer.go b/flytectl/pkg/printer/outputformat_enumer.go new file mode 100644 index 0000000000..9085e9209d --- /dev/null +++ b/flytectl/pkg/printer/outputformat_enumer.go @@ -0,0 +1,87 @@ +// Code generated by "enumer --type=OutputFormat -json -yaml -trimprefix=OutputFormat"; DO NOT EDIT. + +package printer + +import ( + "encoding/json" + "fmt" +) + +const _OutputFormatName = "TABLEJSONYAMLDOTDOTURL" + +var _OutputFormatIndex = [...]uint8{0, 5, 9, 13, 16, 22} + +func (i OutputFormat) String() string { + if i >= OutputFormat(len(_OutputFormatIndex)-1) { + return fmt.Sprintf("OutputFormat(%d)", i) + } + return _OutputFormatName[_OutputFormatIndex[i]:_OutputFormatIndex[i+1]] +} + +var _OutputFormatValues = []OutputFormat{0, 1, 2, 3, 4} + +var _OutputFormatNameToValueMap = map[string]OutputFormat{ + _OutputFormatName[0:5]: 0, + _OutputFormatName[5:9]: 1, + _OutputFormatName[9:13]: 2, + _OutputFormatName[13:16]: 3, + _OutputFormatName[16:22]: 4, +} + +// OutputFormatString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func OutputFormatString(s string) (OutputFormat, error) { + if val, ok := _OutputFormatNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to OutputFormat values", s) +} + +// OutputFormatValues returns all values of the enum +func OutputFormatValues() []OutputFormat { + return _OutputFormatValues +} + +// IsAOutputFormat returns "true" if the value is listed in the enum definition. "false" otherwise +func (i OutputFormat) IsAOutputFormat() bool { + for _, v := range _OutputFormatValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for OutputFormat +func (i OutputFormat) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for OutputFormat +func (i *OutputFormat) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("OutputFormat should be a string, got %s", data) + } + + var err error + *i, err = OutputFormatString(s) + return err +} + +// MarshalYAML implements a YAML Marshaler for OutputFormat +func (i OutputFormat) MarshalYAML() (interface{}, error) { + return i.String(), nil +} + +// UnmarshalYAML implements a YAML Unmarshaler for OutputFormat +func (i *OutputFormat) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + + var err error + *i, err = OutputFormatString(s) + return err +} diff --git a/flytectl/pkg/printer/printer.go b/flytectl/pkg/printer/printer.go new file mode 100644 index 0000000000..6dcf98d73e --- /dev/null +++ b/flytectl/pkg/printer/printer.go @@ -0,0 +1,311 @@ +package printer + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "sort" + "strings" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/errors" + "github.com/flyteorg/flytectl/pkg/visualize" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "github.com/kataras/tablewriter" + "github.com/landoop/tableprinter" + "github.com/pkg/browser" + "github.com/yalp/jsonpath" + "sigs.k8s.io/yaml" +) + +//go:generate enumer --type=OutputFormat -json -yaml -trimprefix=OutputFormat +type OutputFormat uint8 + +const ( + OutputFormatTABLE OutputFormat = iota + OutputFormatJSON + OutputFormatYAML + OutputFormatDOT + OutputFormatDOTURL +) + +// Set implements PFlag's Value interface to attempt to set the value of the flag from string. +func (i *OutputFormat) Set(val string) error { + policy, err := OutputFormatString(val) + if err != nil { + return err + } + + *i = policy + return nil +} + +// Type implements PFlag's Value interface to return type name. +func (i OutputFormat) Type() string { + return "OutputFormat" +} + +const GraphVisualizationServiceURL = "http://graph.flyte.org/#" + +func OutputFormats() []string { + var v []string + for _, o := range OutputFormatValues() { + v = append(v, o.String()) + } + return v +} + +type Column struct { + Header string + JSONPath string + // Optional Truncation directive to limit content. This will simply truncate the string output. + TruncateTo *int +} + +type Printer struct{} + +const ( + empty = "" + tab = "\t" + DefaultFormattedDescriptionsKey = "_formatted_descriptions" + defaultLineWidth = 25 +) + +// Projects the columns in one row of data from the given JSON using the []Column map +func extractRow(data interface{}, columns []Column) []string { + if columns == nil || data == nil { + return nil + } + tableData := make([]string, 0, len(columns)) + + for _, c := range columns { + out, err := jsonpath.Read(data, c.JSONPath) + if err != nil || out == nil { + out = "" + } + s := fmt.Sprintf("%s", out) + if c.TruncateTo != nil { + t := *c.TruncateTo + if len(s) > t { + s = s[:t] + } + } + tableData = append(tableData, s) + } + return tableData +} + +// Projects the columns from the given list of JSON elements using the []Column map +// Potential performance problem, as it returns all the rows in memory. +// We could use the render row, but that may lead to misalignment. +// TODO figure out a more optimal way +func projectColumns(rows []interface{}, column []Column) [][]string { + responses := make([][]string, 0, len(rows)) + for _, row := range rows { + responses = append(responses, extractRow(row, column)) + } + return responses +} + +func (p Printer) JSONToTable(w io.Writer, jsonRows []byte, columns []Column) error { + var rawRows []interface{} + if err := json.Unmarshal(jsonRows, &rawRows); err != nil { + return errors.Wrapf("JSONUnmarshalFailure", err, "failed to unmarshal into []interface{} from json") + } + if rawRows == nil { + return errors.Errorf("JSONUnmarshalNil", "expected one row or empty rows, received nil") + } + rows := projectColumns(rawRows, columns) + + printer := tableprinter.New(w) + // TODO make this configurable + printer.AutoWrapText = false + printer.BorderLeft = true + printer.BorderRight = true + printer.BorderBottom = true + printer.BorderTop = true + printer.RowLine = true + printer.ColumnSeparator = "|" + printer.HeaderBgColor = tablewriter.BgHiWhiteColor + headers := make([]string, 0, len(columns)) + positions := make([]int, 0, len(columns)) + for _, c := range columns { + headers = append(headers, c.Header) + positions = append(positions, 30) + } + if r := printer.Render(headers, rows, positions, true); r == -1 { + return fmt.Errorf("failed to render table") + } + if w == os.Stdout { + fmt.Printf("%d rows\n", len(rows)) + } + return nil +} + +func (p Printer) PrintInterface(format OutputFormat, columns []Column, v interface{}) error { + jsonRows, err := json.Marshal(v) + if err != nil { + return err + } + // Factory Method for all printer + switch format { + case OutputFormatJSON, OutputFormatYAML: + return printJSONYaml(format, v) + default: // Print table + return p.JSONToTable(os.Stdout, jsonRows, columns) + } +} + +// printJSONYaml internal function for printing +func printJSONYaml(format OutputFormat, v interface{}) error { + if format != OutputFormatJSON && format != OutputFormatYAML { + return fmt.Errorf("this function should be called only for json/yaml printing") + } + buf := new(bytes.Buffer) + encoder := json.NewEncoder(buf) + encoder.SetIndent(empty, tab) + + err := encoder.Encode(v) + if err != nil { + return err + } + + if format == OutputFormatJSON { + fmt.Println(buf.String()) + } else { + v, err := yaml.JSONToYAML(buf.Bytes()) + if err != nil { + return err + } + fmt.Println(string(v)) + } + return nil +} + +func FormatVariableDescriptions(variableMap map[string]*core.Variable) { + keys := make([]string, 0, len(variableMap)) + // sort the keys for testing and consistency with other output formats + for k := range variableMap { + keys = append(keys, k) + } + sort.Strings(keys) + + var descriptions []string + for _, k := range keys { + v := variableMap[k] + // a: a isn't very helpful + if k != v.Description { + descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Description))) + } else { + descriptions = append(descriptions, getTruncatedLine(k)) + } + + } + variableMap[DefaultFormattedDescriptionsKey] = &core.Variable{Description: strings.Join(descriptions, "\n")} +} + +func FormatParameterDescriptions(parameterMap map[string]*core.Parameter) { + keys := make([]string, 0, len(parameterMap)) + // sort the keys for testing and consistency with other output formats + for k := range parameterMap { + keys = append(keys, k) + } + sort.Strings(keys) + + var descriptions []string + for _, k := range keys { + v := parameterMap[k] + if v.Var == nil { + continue + } + // a: a isn't very helpful + if k != v.Var.Description { + descriptions = append(descriptions, getTruncatedLine(fmt.Sprintf("%s: %s", k, v.Var.Description))) + } else { + descriptions = append(descriptions, getTruncatedLine(k)) + } + } + parameterMap[DefaultFormattedDescriptionsKey] = &core.Parameter{Var: &core.Variable{Description: strings.Join(descriptions, "\n")}} +} + +func getTruncatedLine(line string) string { + // TODO: maybe add width to function signature later + width := defaultLineWidth + if len(line) > width { + return line[:width-3] + "..." + } + return line +} + +func (p Printer) Print(format OutputFormat, columns []Column, messages ...proto.Message) error { + + printableMessages := make([]*PrintableProto, 0, len(messages)) + for _, m := range messages { + printableMessages = append(printableMessages, &PrintableProto{Message: m}) + } + + // Factory Method for all printer + switch format { + case OutputFormatJSON, OutputFormatYAML: // Print protobuf to json + var v interface{} + if len(printableMessages) == 1 { + v = printableMessages[0] + } else { + v = printableMessages + } + return printJSONYaml(format, v) + case OutputFormatDOT, OutputFormatDOTURL: + var workflows []*admin.Workflow + for _, m := range messages { + if w, ok := m.(*admin.Workflow); ok { + workflows = append(workflows, w) + } else { + return fmt.Errorf("visualization is only supported on workflows") + } + } + if len(workflows) == 0 { + return fmt.Errorf("atleast one workflow required for visualization") + } + workflow := workflows[0] + graphStr, err := visualize.RenderWorkflow(workflow.Closure.CompiledWorkflow) + if err != nil { + return errors.Wrapf("VisualizationError", err, "failed to visualize workflow") + } + if format == OutputFormatDOTURL { + urlToOpen := GraphVisualizationServiceURL + url.PathEscape(graphStr) + fmt.Println("Opening the browser at " + urlToOpen) + return browser.OpenURL(urlToOpen) + } + fmt.Println(graphStr) + default: // Print table + rows, err := json.Marshal(printableMessages) + if err != nil { + return errors.Wrapf("ProtoToJSONFailure", err, "failed to marshal proto messages") + } + return p.JSONToTable(os.Stdout, rows, columns) + } + return nil +} + +type PrintableProto struct { + proto.Message +} + +var marshaller = jsonpb.Marshaler{ + Indent: tab, +} + +func (p PrintableProto) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + err := marshaller.Marshal(buf, p.Message) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/flytectl/pkg/printer/printer_test.go b/flytectl/pkg/printer/printer_test.go new file mode 100644 index 0000000000..eb4960a878 --- /dev/null +++ b/flytectl/pkg/printer/printer_test.go @@ -0,0 +1,310 @@ +package printer + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "testing" + "time" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/stretchr/testify/assert" +) + +type Inner struct { + X string `json:"x"` + Y *time.Time `json:"y"` +} + +func LaunchplanToProtoMessages(l []*admin.LaunchPlan) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +func WorkflowToProtoMessages(l []*admin.Workflow) []proto.Message { + messages := make([]proto.Message, 0, len(l)) + for _, m := range l { + messages = append(messages, m) + } + return messages +} + +// TODO Convert this to a Testable Example. For some reason the comparison fails +func TestJSONToTable(t *testing.T) { + trunc := 5 + d := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + j := []struct { + A string `json:"a"` + B int `json:"b"` + S *Inner `json:"s"` + }{ + {"hello", 0, &Inner{"x-hello", nil}}, + {"hello world", 0, &Inner{"x-hello", &d}}, + {"hello", 0, nil}, + } + + b, err := json.Marshal(j) + assert.NoError(t, err) + p := Printer{} + assert.NoError(t, p.JSONToTable(os.Stdout, b, []Column{ + {"A", "$.a", &trunc}, + {"S", "$.s.y", nil}, + })) + // Output: + // | A | S | + // ------- ---------------------- + // | hello | | + // | hello | 2020-01-01T00:00:00Z | + // | hello | | + // 3 rows +} + +func TestOutputFormats(t *testing.T) { + expected := []string{"TABLE", "JSON", "YAML", "DOT", "DOTURL"} + outputs := OutputFormats() + assert.Equal(t, 5, len(outputs)) + assert.Equal(t, expected, outputs) +} + +func TestOutputFormatString(t *testing.T) { + o, err := OutputFormatString("JSON") + assert.Nil(t, err) + assert.Equal(t, OutputFormat(1), o) +} + +func TestOutputFormatStringErr(t *testing.T) { + o, err := OutputFormatString("FLYTE") + assert.NotNil(t, err) + assert.Equal(t, OutputFormat(0), o) + assert.Equal(t, fmt.Errorf("%s does not belong to OutputFormat values", "FLYTE"), err) +} + +func TestIsAOutputFormat(t *testing.T) { + o := OutputFormat(5) + check := o.IsAOutputFormat() + assert.Equal(t, false, check) + + o = OutputFormat(1) + check = o.IsAOutputFormat() + assert.Equal(t, true, check) +} + +func TestMarshalJson(t *testing.T) { + o := OutputFormat(1) + check, err := o.MarshalJSON() + assert.Nil(t, err) + assert.Equal(t, []byte(`"JSON"`), check) + + result, err := o.MarshalYAML() + assert.Nil(t, err) + assert.Equal(t, "JSON", result) +} + +func TestPrint(t *testing.T) { + p := Printer{} + lp := []Column{ + {Header: "Version", JSONPath: "$.id.version"}, + {Header: "Name", JSONPath: "$.id.name"}, + } + launchPlan := &admin.LaunchPlan{ + Id: &core.Identifier{ + Name: "launchplan1", + Version: "v2", + }, + Spec: &admin.LaunchPlanSpec{ + DefaultInputs: &core.ParameterMap{}, + }, + Closure: &admin.LaunchPlanClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + ExpectedInputs: &core.ParameterMap{}, + }, + } + launchPlans := []*admin.LaunchPlan{launchPlan} + err := p.Print(OutputFormat(0), lp, LaunchplanToProtoMessages(launchPlans)...) + assert.Nil(t, err) + err = p.Print(OutputFormat(1), lp, LaunchplanToProtoMessages(launchPlans)...) + assert.Nil(t, err) + err = p.Print(OutputFormat(2), lp, LaunchplanToProtoMessages(launchPlans)...) + assert.Nil(t, err) + err = p.Print(OutputFormat(3), lp, LaunchplanToProtoMessages(launchPlans)...) + assert.NotNil(t, err) + err = p.Print(OutputFormat(4), lp, LaunchplanToProtoMessages(launchPlans)...) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("visualization is only supported on workflows"), err) + + sortedListLiteralType := core.Variable{ + Type: &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + } + variableMap := map[string]*core.Variable{ + "sorted_list1": &sortedListLiteralType, + "sorted_list2": &sortedListLiteralType, + } + + var compiledTasks []*core.CompiledTask + compiledTasks = append(compiledTasks, &core.CompiledTask{ + Template: &core.TaskTemplate{ + Id: &core.Identifier{ + Project: "dummyProject", + Domain: "dummyDomain", + Name: "dummyName", + Version: "dummyVersion", + }, + Interface: &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: variableMap, + }, + }, + }, + }) + + workflow1 := &admin.Workflow{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Tasks: compiledTasks, + }, + }, + } + + workflows := []*admin.Workflow{workflow1} + + err = p.Print(OutputFormat(3), lp, WorkflowToProtoMessages(workflows)...) + assert.Nil(t, err) + workflows = []*admin.Workflow{} + err = p.Print(OutputFormat(3), lp, WorkflowToProtoMessages(workflows)...) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("atleast one workflow required for visualization"), err) + var badCompiledTasks []*core.CompiledTask + badCompiledTasks = append(badCompiledTasks, &core.CompiledTask{ + Template: &core.TaskTemplate{}, + }) + badWorkflow := &admin.Workflow{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Tasks: badCompiledTasks, + }, + }, + } + workflows = []*admin.Workflow{badWorkflow} + err = p.Print(OutputFormat(3), lp, WorkflowToProtoMessages(workflows)...) + assert.NotNil(t, err) + + assert.Equal(t, fmt.Errorf("no template found in the workflow task template:<> "), errors.Unwrap(err)) + + badWorkflow2 := &admin.Workflow{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: nil, + }, + } + workflows = []*admin.Workflow{badWorkflow2} + err = p.Print(OutputFormat(3), lp, WorkflowToProtoMessages(workflows)...) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("empty workflow closure"), errors.Unwrap(err)) + + var badSubWorkflow []*core.CompiledWorkflow + badSubWorkflow = append(badSubWorkflow, &core.CompiledWorkflow{ + Template: &core.WorkflowTemplate{}, + }) + + badWorkflow3 := &admin.Workflow{ + Id: &core.Identifier{ + Name: "task1", + Version: "v1", + }, + Closure: &admin.WorkflowClosure{ + CreatedAt: ×tamppb.Timestamp{Seconds: 1, Nanos: 0}, + CompiledWorkflow: &core.CompiledWorkflowClosure{ + Tasks: compiledTasks, + SubWorkflows: badSubWorkflow, + }, + }, + } + workflows = []*admin.Workflow{badWorkflow3} + err = p.Print(OutputFormat(3), lp, WorkflowToProtoMessages(workflows)...) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("no template found in the sub workflow template:<> "), errors.Unwrap(err)) +} + +func TestGetTruncatedLine(t *testing.T) { + testStrings := map[string]string{ + "foo": "foo", + "": "", + "short description": "short description", + "1234567890123456789012345": "1234567890123456789012345", + "12345678901234567890123456": "1234567890123456789012...", + "long description probably needs truncate": "long description proba...", + } + for k, v := range testStrings { + assert.Equal(t, v, getTruncatedLine(k)) + } +} + +func TestFormatVariableDescriptions(t *testing.T) { + fooVar := &core.Variable{ + Description: "foo", + } + barVar := &core.Variable{ + Description: "bar", + } + variableMap := map[string]*core.Variable{ + "var1": fooVar, + "var2": barVar, + "foo": fooVar, + "bar": barVar, + } + FormatVariableDescriptions(variableMap) + assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", variableMap[DefaultFormattedDescriptionsKey].Description) +} + +func TestFormatParameterDescriptions(t *testing.T) { + fooParam := &core.Parameter{ + Var: &core.Variable{ + Description: "foo", + }, + } + barParam := &core.Parameter{ + Var: &core.Variable{ + Description: "bar", + }, + } + emptyParam := &core.Parameter{} + paramMap := map[string]*core.Parameter{ + "var1": fooParam, + "var2": barParam, + "foo": fooParam, + "bar": barParam, + "empty": emptyParam, + } + FormatParameterDescriptions(paramMap) + assert.Equal(t, "bar\nfoo\nvar1: foo\nvar2: bar", paramMap[DefaultFormattedDescriptionsKey].Var.Description) +} diff --git a/flytectl/pkg/sandbox/start.go b/flytectl/pkg/sandbox/start.go new file mode 100644 index 0000000000..7ba775f702 --- /dev/null +++ b/flytectl/pkg/sandbox/start.go @@ -0,0 +1,433 @@ +package sandbox + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/apoorvam/goterminal" + "github.com/avast/retry-go" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-connections/nat" + "github.com/enescakir/emoji" + "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flytectl/clierrors" + dockerCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/docker" + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + "github.com/flyteorg/flytectl/pkg/configutil" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/github" + "github.com/flyteorg/flytectl/pkg/k8s" + "github.com/flyteorg/flytectl/pkg/util" + "github.com/kataras/tablewriter" + corev1api "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + flyteNamespace = "flyte" + diskPressureTaint = "node.kubernetes.io/disk-pressure" + taintEffect = "NoSchedule" + sandboxContextName = "flyte-sandbox" + sandboxDockerContext = "default" + K8sEndpoint = "https://127.0.0.1:6443" + sandboxK8sEndpoint = "https://127.0.0.1:30086" + sandboxImageName = "cr.flyte.org/flyteorg/flyte-sandbox" + demoImageName = "cr.flyte.org/flyteorg/flyte-sandbox-bundled" + DefaultFlyteConfig = "/opt/flyte/defaults.flyte.yaml" + k3sKubeConfigEnvVar = "K3S_KUBECONFIG_OUTPUT=/var/lib/flyte/config/kubeconfig" +) + +func isNodeTainted(ctx context.Context, client corev1.CoreV1Interface) (bool, error) { + nodes, err := client.Nodes().List(ctx, v1.ListOptions{}) + if err != nil { + return false, err + } + match := 0 + for _, node := range nodes.Items { + for _, c := range node.Spec.Taints { + if c.Key == diskPressureTaint && c.Effect == taintEffect { + match++ + } + } + } + if match > 0 { + return true, nil + } + return false, nil +} + +func getFlyteDeployment(ctx context.Context, client corev1.CoreV1Interface) (*corev1api.PodList, error) { + pods, err := client.Pods(flyteNamespace).List(ctx, v1.ListOptions{}) + if err != nil { + return nil, err + } + return pods, nil +} + +func WatchFlyteDeployment(ctx context.Context, appsClient corev1.CoreV1Interface) error { + writer := goterminal.New(os.Stdout) + defer writer.Reset() + + table := tablewriter.NewWriter(writer) + table.SetHeader([]string{"Service", "Status", "Namespace"}) + table.SetRowLine(true) + + done := false + for { + isTaint, err := isNodeTainted(ctx, appsClient) + if err != nil { + return err + } + if isTaint { + return fmt.Errorf("docker sandbox doesn't have sufficient memory available. Please run docker system prune -a --volumes") + } + + pods, err := getFlyteDeployment(ctx, appsClient) + if err != nil { + return err + } + table.ClearRows() + table.SetAutoWrapText(false) + table.SetAutoFormatHeaders(true) + + var total, ready int + total = len(pods.Items) + ready = 0 + if total != 0 { + for _, v := range pods.Items { + for _, condition := range v.Status.Conditions { + if string(condition.Type) == string(corev1api.PodReady) && condition.Status == corev1api.ConditionTrue { + ready++ + break + } + } + + if len(v.Status.Conditions) > 0 { + table.Append([]string{v.GetName(), string(v.Status.Phase), v.GetNamespace()}) + } + } + table.Render() + if total == ready { + done = true + } + } else { + table.Append([]string{"k8s: This might take a little bit", "Bootstrapping", ""}) + table.Render() + } + + writer.Clear() + writer.Print() + + if done { + break + } + + time.Sleep(5 * time.Second) + } + + return nil +} + +func MountVolume(file, destination string) (*mount.Mount, error) { + if len(file) > 0 { + source, err := filepath.Abs(file) + if err != nil { + return nil, err + } + return &mount.Mount{ + Type: mount.TypeBind, + Source: source, + Target: destination, + }, nil + } + return nil, nil +} + +func UpdateLocalKubeContext(k8sCtxMgr k8s.ContextOps, dockerCtx string, contextName string, kubeConfigPath string) error { + srcConfigAccess := &clientcmd.PathOptions{ + GlobalFile: kubeConfigPath, + LoadingRules: clientcmd.NewDefaultClientConfigLoadingRules(), + } + return k8sCtxMgr.CopyContext(srcConfigAccess, dockerCtx, contextName, flyteNamespace) +} + +func startSandbox(ctx context.Context, cli docker.Docker, g github.GHRepoService, reader io.Reader, sandboxConfig *sandboxCmdConfig.Config, defaultImageName string, defaultImagePrefix string, exposedPorts map[nat.Port]struct{}, portBindings map[nat.Port][]nat.PortBinding, consolePort int) (*bufio.Scanner, error) { + fmt.Printf("%v Bootstrapping a brand new Flyte cluster... %v %v\n", emoji.FactoryWorker, emoji.Hammer, emoji.Wrench) + if sandboxConfig.DryRun { + docker.PrintRemoveContainer(docker.FlyteSandboxClusterName) + } else { + dockerCmdConfig.DefaultConfig.Force = sandboxConfig.Force + if err := docker.RemoveSandbox(ctx, cli, reader); err != nil { + if err.Error() != clierrors.ErrSandboxExists { + return nil, err + } + fmt.Printf("Existing details of your sandbox") + util.PrintSandboxStartMessage(consolePort, docker.Kubeconfig, sandboxConfig.DryRun) + return nil, nil + } + } + + templateValues := configutil.ConfigTemplateSpec{ + Host: "localhost:30080", + Insecure: true, + DataConfig: &configutil.DataConfig{ + Endpoint: "http://localhost:30002", + AccessKey: "minio", + SecretKey: "miniostorage", + }, + } + if err := configutil.SetupConfig(configutil.FlytectlConfig, configutil.GetTemplate(), templateValues); err != nil { + return nil, err + } + + volumes := docker.Volumes + // Mount this even though it should no longer be necessary. This is for user code + if vol, err := MountVolume(sandboxConfig.DeprecatedSource, docker.Source); err != nil { + return nil, err + } else if vol != nil { + volumes = append(volumes, *vol) + } + + // This is the sandbox configuration directory mount, flyte will write the kubeconfig here. May hold more in future releases + // To be interoperable with the old sandbox, only mount if the directory exists, should've created by StartCluster + if fileInfo, err := os.Stat(docker.FlyteSandboxConfigDir); err == nil { + if fileInfo.IsDir() { + if vol, err := MountVolume(docker.FlyteSandboxConfigDir, docker.FlyteSandboxInternalConfigDir); err != nil { + return nil, err + } else if vol != nil { + volumes = append(volumes, *vol) + } + } + } + + // Create and mount a docker volume that will be used to persist data + // across sandbox clusters + if _, err := docker.GetOrCreateVolume( + ctx, + cli, + docker.FlyteSandboxVolumeName, + sandboxConfig.DryRun, + ); err != nil { + return nil, err + } + volumes = append(volumes, mount.Mount{ + Type: mount.TypeVolume, + Source: docker.FlyteSandboxVolumeName, + Target: docker.FlyteSandboxInternalStorageDir, + }) + + sandboxImage := sandboxConfig.Image + if len(sandboxImage) == 0 { + image, version, err := github.GetFullyQualifiedImageName(defaultImagePrefix, sandboxConfig.Version, defaultImageName, sandboxConfig.Prerelease, g) + if err != nil { + return nil, err + } + sandboxImage = image + fmt.Printf("%v Going to use Flyte %s release with image %s \n", emoji.Whale, version, image) + } + if err := docker.PullDockerImage(ctx, cli, sandboxImage, sandboxConfig.ImagePullPolicy, sandboxConfig.ImagePullOptions, sandboxConfig.DryRun); err != nil { + return nil, err + } + sandboxEnv := sandboxConfig.Env + if sandboxConfig.Dev { + sandboxEnv = append(sandboxEnv, "FLYTE_DEV=True") + } + + if sandboxConfig.DisableAgent { + sandboxEnv = append(sandboxEnv, "DISABLE_AGENT=True") + } + + ID, err := docker.StartContainer(ctx, cli, volumes, exposedPorts, portBindings, docker.FlyteSandboxClusterName, + sandboxImage, sandboxEnv, sandboxConfig.DryRun) + + if err != nil { + fmt.Printf("%v Something went wrong: Failed to start Sandbox container %v, Please check your docker client and try again. \n", emoji.GrimacingFace, emoji.Whale) + return nil, err + } + + var logReader *bufio.Scanner + if !sandboxConfig.DryRun { + logReader, err = docker.ReadLogs(ctx, cli, ID) + if err != nil { + return nil, err + } + } + + return logReader, nil +} + +func StartCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdConfig.Config, defaultImageName string, defaultImagePrefix string, exposedPorts map[nat.Port]struct{}, portBindings map[nat.Port][]nat.PortBinding, consolePort int) error { + k8sCtxMgr := k8s.NewK8sContextManager() + err := k8sCtxMgr.CheckConfig() + if err != nil { + return err + } + + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + + ghRepo := github.GetGHRepoService() + if err := util.CreatePathAndFile(docker.Kubeconfig); err != nil { + return err + } + + reader, err := startSandbox(ctx, cli, ghRepo, os.Stdin, sandboxConfig, defaultImageName, defaultImagePrefix, exposedPorts, portBindings, consolePort) + if err != nil { + return err + } + + if reader != nil { + var k8sClient k8s.K8s + err = retry.Do( + func() error { + // This should wait for the kubeconfig file being there. + k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) + return err + }, + retry.Attempts(10), + ) + if err != nil { + return err + } + + // Live-ness check + fmt.Printf("%v Waiting for cluster to come up... %v\n", emoji.HourglassNotDone, emoji.HourglassNotDone) + err = retry.Do( + func() error { + // Have to get a new client every time because you run into x509 errors if not + k8sClient, err = k8s.GetK8sClient(docker.Kubeconfig, K8sEndpoint) + if err != nil { + logger.Debugf(ctx, "Error getting K8s client in liveness check %s", err) + return err + } + req := k8sClient.CoreV1().RESTClient().Get() + req = req.RequestURI("livez") + res := req.Do(ctx) + return res.Error() + }, + retry.Attempts(15), + ) + if err != nil { + return err + } + + // Readiness check + err = retry.Do( + func() error { + // No need to refresh client here + req := k8sClient.CoreV1().RESTClient().Get() + req = req.RequestURI("readyz") + res := req.Do(ctx) + return res.Error() + }, + retry.Attempts(10), + ) + if err != nil { + return err + } + + // This will copy the kubeconfig from where k3s writes it () to the main file. + // This code is located after the waits above since it appears that k3s goes through at least a couple versions + // of the config keys/certs. If this copy is done too early, the copied credentials won't work. + if err = UpdateLocalKubeContext(k8sCtxMgr, sandboxDockerContext, sandboxContextName, docker.Kubeconfig); err != nil { + return err + } + + // Watch for Flyte Deployment + if err := WatchFlyteDeployment(ctx, k8sClient.CoreV1()); err != nil { + return err + } + } + return nil +} + +// StartClusterForSandbox is the code for the original multi deploy version of sandbox, should be removed once we +// document the new development experience for plugins. +func StartClusterForSandbox(ctx context.Context, args []string, sandboxConfig *sandboxCmdConfig.Config, defaultImageName string, defaultImagePrefix string, exposedPorts map[nat.Port]struct{}, portBindings map[nat.Port][]nat.PortBinding, consolePort int) error { + k8sCtxMgr := k8s.NewK8sContextManager() + err := k8sCtxMgr.CheckConfig() + if err != nil { + return err + } + cli, err := docker.GetDockerClient() + if err != nil { + return err + } + + ghRepo := github.GetGHRepoService() + + if err := util.CreatePathAndFile(docker.SandboxKubeconfig); err != nil { + return err + } + + reader, err := startSandbox(ctx, cli, ghRepo, os.Stdin, sandboxConfig, defaultImageName, defaultImagePrefix, exposedPorts, portBindings, consolePort) + if err != nil { + return err + } + if reader != nil { + docker.WaitForSandbox(reader, docker.SuccessMessage) + } + + if reader != nil { + var k8sClient k8s.K8s + err = retry.Do( + func() error { + k8sClient, err = k8s.GetK8sClient(docker.SandboxKubeconfig, sandboxK8sEndpoint) + return err + }, + retry.Attempts(10), + ) + if err != nil { + return err + } + if err = UpdateLocalKubeContext(k8sCtxMgr, sandboxDockerContext, sandboxContextName, docker.SandboxKubeconfig); err != nil { + return err + } + + // TODO: This doesn't appear to correctly watch for the Flyte deployment but doesn't do so on master either. + if err := WatchFlyteDeployment(ctx, k8sClient.CoreV1()); err != nil { + return err + } + } + return nil +} + +func StartDemoCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdConfig.Config) error { + sandboxImagePrefix := "sha" + exposedPorts, portBindings, err := docker.GetDemoPorts() + if err != nil { + return err + } + // K3s will automatically write the file specified by this var, which is mounted from user's local state dir. + + sandboxConfig.Env = append(sandboxConfig.Env, k3sKubeConfigEnvVar) + err = StartCluster(ctx, args, sandboxConfig, demoImageName, sandboxImagePrefix, exposedPorts, portBindings, util.DemoConsolePort) + if err != nil { + return err + } + util.PrintDemoStartMessage(util.DemoConsolePort, docker.Kubeconfig, sandboxConfig.DryRun) + return nil +} + +func StartSandboxCluster(ctx context.Context, args []string, sandboxConfig *sandboxCmdConfig.Config) error { + demoImagePrefix := "dind" + exposedPorts, portBindings, err := docker.GetSandboxPorts() + if err != nil { + return err + } + err = StartClusterForSandbox(ctx, args, sandboxConfig, sandboxImageName, demoImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + if err != nil { + return err + } + util.PrintSandboxStartMessage(util.SandBoxConsolePort, docker.SandboxKubeconfig, sandboxConfig.DryRun) + return nil +} diff --git a/flytectl/pkg/sandbox/start_test.go b/flytectl/pkg/sandbox/start_test.go new file mode 100644 index 0000000000..1bfec25e26 --- /dev/null +++ b/flytectl/pkg/sandbox/start_test.go @@ -0,0 +1,453 @@ +package sandbox + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + f "github.com/flyteorg/flytectl/pkg/filesystemutils" + ghutil "github.com/flyteorg/flytectl/pkg/github" + ghMocks "github.com/flyteorg/flytectl/pkg/github/mocks" + "github.com/flyteorg/flytectl/pkg/k8s" + k8sMocks "github.com/flyteorg/flytectl/pkg/k8s/mocks" + "github.com/flyteorg/flytectl/pkg/util" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + "github.com/google/go-github/v42/github" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testclient "k8s.io/client-go/kubernetes/fake" +) + +var content = ` +apiVersion: v1 +clusters: +- cluster: + server: https://localhost:8080 + extensions: + - name: client.authentication.k8s.io/exec + extension: + audience: foo + other: bar + name: default +contexts: +- context: + cluster: default + user: default + namespace: bar + name: default +current-context: default +kind: Config +users: +- name: default + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - arg-1 + - arg-2 + command: foo-command + provideClusterInfo: true +` + +var fakeNode = &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{}, + }, +} + +var fakePod = corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, +} + +var ( + githubMock *ghMocks.GHRepoService + ctx context.Context + mockDocker *mocks.Docker +) + +func sandboxSetup() { + ctx = context.Background() + mockDocker = &mocks.Docker{} + errCh := make(chan error) + sandboxCmdConfig.DefaultConfig.Version = "v0.19.1" + bodyStatus := make(chan container.ContainerWaitOKBody) + githubMock = &ghMocks.GHRepoService{} + sandboxCmdConfig.DefaultConfig.Image = "dummyimage" + mockDocker.OnVolumeList(ctx, filters.NewArgs(filters.KeyValuePair{Key: "name", Value: fmt.Sprintf("^%s$", docker.FlyteSandboxVolumeName)})).Return(volume.VolumeListOKBody{Volumes: []*types.Volume{}}, nil) + mockDocker.OnVolumeCreate(ctx, volume.VolumeCreateBody{Name: docker.FlyteSandboxVolumeName}).Return(types.Volume{Name: docker.FlyteSandboxVolumeName}, nil) + mockDocker.OnContainerCreateMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(container.ContainerCreateCreatedBody{ + ID: "Hello", + }, nil) + + mockDocker.OnContainerWaitMatch(ctx, mock.Anything, container.WaitConditionNotRunning).Return(bodyStatus, errCh) +} + +func dummyReader() io.ReadCloser { + return io.NopCloser(strings.NewReader("")) +} + +func TestStartFunc(t *testing.T) { + defaultImagePrefix := "dind" + exposedPorts, portBindings, _ := docker.GetSandboxPorts() + config := sandboxCmdConfig.DefaultConfig + config.Image = "dummyimage" + config.ImagePullOptions = docker.ImagePullOptions{ + RegistryAuth: "", + Platform: "", + } + config.Dev = true + config.DisableAgent = true + assert.Nil(t, util.SetupFlyteDir()) + assert.Nil(t, os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte", "state"), os.ModePerm)) + assert.Nil(t, ioutil.WriteFile(docker.Kubeconfig, []byte(content), os.ModePerm)) + + fakePod.SetName("flyte") + + t.Run("Successfully run demo cluster", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + mockDocker.OnVolumeList(ctx, filters.NewArgs(filters.KeyValuePair{Key: mock.Anything, Value: mock.Anything})).Return(volume.VolumeListOKBody{Volumes: []*types.Volume{}}, nil) + mockDocker.OnVolumeCreate(ctx, volume.VolumeCreateBody{Name: mock.Anything}).Return(types.Volume{}, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.Nil(t, err) + }) + t.Run("Successfully exit when demo cluster exist", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + reader, err := startSandbox(ctx, mockDocker, githubMock, strings.NewReader("n"), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.Nil(t, err) + assert.Nil(t, reader) + }) + t.Run("Successfully run demo cluster with source code", func(t *testing.T) { + sandboxCmdConfig.DefaultConfig.DeprecatedSource = f.UserHomeDir() + sandboxCmdConfig.DefaultConfig.Version = "" + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.Nil(t, err) + }) + t.Run("Successfully run demo cluster with abs path of source code", func(t *testing.T) { + sandboxCmdConfig.DefaultConfig.DeprecatedSource = "../" + sandboxCmdConfig.DefaultConfig.Version = "" + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.Nil(t, err) + }) + t.Run("Successfully run demo cluster with specific version", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + sandboxCmdConfig.DefaultConfig.Image = "" + tag := "v0.15.0" + githubMock.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + }, nil, nil) + + githubMock.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("dummySha", nil, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.Nil(t, err) + }) + t.Run("Failed run demo cluster with wrong version", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + sandboxCmdConfig.DefaultConfig.Image = "" + githubMock.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, fmt.Errorf("non-existent-tag")) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), sandboxCmdConfig.DefaultConfig, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "non-existent-tag", err.Error()) + }) + t.Run("Error in pulling image", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), fmt.Errorf("failed to pull")) + sandboxCmdConfig.DefaultConfig.Image = "" + tag := "v0.15.0" + githubMock.OnGetReleaseByTagMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&github.RepositoryRelease{ + TagName: &tag, + }, nil, nil) + + githubMock.OnGetCommitSHA1Match(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return("dummySha", nil, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), sandboxCmdConfig.DefaultConfig, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "failed to pull", err.Error()) + }) + t.Run("Error in removing existing cluster", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(fmt.Errorf("failed to remove container")) + _, err := startSandbox(ctx, mockDocker, githubMock, strings.NewReader("y"), sandboxCmdConfig.DefaultConfig, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "failed to remove container", err.Error()) + }) + t.Run("Error in start container", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(fmt.Errorf("failed to run container")) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), sandboxCmdConfig.DefaultConfig, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "failed to run container", err.Error()) + }) + t.Run("Error in reading logs", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, fmt.Errorf("failed to get container logs")) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), sandboxCmdConfig.DefaultConfig, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "failed to get container logs", err.Error()) + }) + t.Run("Error in list container", func(t *testing.T) { + sandboxSetup() + mockDocker.OnContainerListMatch(mock.Anything, mock.Anything).Return([]types.Container{}, fmt.Errorf("failed to list containers")) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + _, err := startSandbox(ctx, mockDocker, githubMock, dummyReader(), config, sandboxImageName, defaultImagePrefix, exposedPorts, portBindings, util.SandBoxConsolePort) + assert.NotNil(t, err) + assert.Equal(t, "failed to list containers", err.Error()) + }) + t.Run("Successfully run demo cluster command", func(t *testing.T) { + // mockOutStream := new(io.Writer) + // cmdCtx := cmdCore.NewCommandContext(admin.InitializeMockClientset(), *mockOutStream) + client := testclient.NewSimpleClientset() + k8s.Client = client + _, err := client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + fakeNode.SetName("master") + _, err = client.CoreV1().Nodes().Create(ctx, fakeNode, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + sandboxSetup() + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + mockDocker.OnImagePullMatch(mock.Anything, mock.Anything, mock.Anything).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + stringReader := strings.NewReader(docker.SuccessMessage) + reader := ioutil.NopCloser(stringReader) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(reader, nil) + mockK8sContextMgr := &k8sMocks.ContextOps{} + docker.Client = mockDocker + sandboxCmdConfig.DefaultConfig.DeprecatedSource = "" + sandboxCmdConfig.DefaultConfig.Version = "" + k8s.ContextMgr = mockK8sContextMgr + ghutil.Client = githubMock + mockK8sContextMgr.OnCheckConfig().Return(nil) + mockK8sContextMgr.OnCopyContextMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + err = StartSandboxCluster(context.Background(), []string{}, config) + assert.Nil(t, err) + }) + t.Run("Error in running demo cluster command", func(t *testing.T) { + // mockOutStream := new(io.Writer) + // cmdCtx := cmdCore.NewCommandContext(admin.InitializeMockClientset(), *mockOutStream) + sandboxSetup() + docker.Client = mockDocker + mockDocker.OnContainerListMatch(mock.Anything, mock.Anything).Return([]types.Container{}, fmt.Errorf("failed to list containers")) + mockDocker.OnImagePullMatch(ctx, mock.Anything, types.ImagePullOptions{}).Return(dummyReader(), nil) + mockDocker.OnContainerStart(ctx, "Hello", types.ContainerStartOptions{}).Return(nil) + mockDocker.OnContainerLogsMatch(ctx, mock.Anything, types.ContainerLogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: true, + Follow: true, + }).Return(nil, nil) + err := StartSandboxCluster(context.Background(), []string{}, config) + assert.NotNil(t, err) + err = StartDemoCluster(context.Background(), []string{}, config) + assert.NotNil(t, err) + }) +} + +func TestMonitorFlyteDeployment(t *testing.T) { + t.Run("Monitor k8s deployment fail because of storage", func(t *testing.T) { + ctx := context.Background() + client := testclient.NewSimpleClientset() + k8s.Client = client + fakePod.SetName("flyte") + fakePod.SetName("flyte") + + _, err := client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + fakeNode.SetName("master") + fakeNode.Spec.Taints = append(fakeNode.Spec.Taints, corev1.Taint{ + Effect: "NoSchedule", + Key: "node.kubernetes.io/disk-pressure", + }) + _, err = client.CoreV1().Nodes().Create(ctx, fakeNode, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + + err = WatchFlyteDeployment(ctx, client.CoreV1()) + assert.NotNil(t, err) + }) + + t.Run("Monitor k8s deployment success", func(t *testing.T) { + ctx := context.Background() + client := testclient.NewSimpleClientset() + k8s.Client = client + fakePod.SetName("flyte") + fakePod.SetName("flyte") + + _, err := client.CoreV1().Pods("flyte").Create(ctx, &fakePod, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + fakeNode.SetName("master") + fakeNode.Spec.Taints = []corev1.Taint{} + _, err = client.CoreV1().Nodes().Create(ctx, fakeNode, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + + err = WatchFlyteDeployment(ctx, client.CoreV1()) + assert.Nil(t, err) + }) +} + +func TestGetFlyteDeploymentCount(t *testing.T) { + ctx := context.Background() + client := testclient.NewSimpleClientset() + c, err := getFlyteDeployment(ctx, client.CoreV1()) + assert.Nil(t, err) + assert.Equal(t, 0, len(c.Items)) +} + +func TestGetNodeTaintStatus(t *testing.T) { + t.Run("Check node taint with success", func(t *testing.T) { + ctx := context.Background() + client := testclient.NewSimpleClientset() + fakeNode.SetName("master") + _, err := client.CoreV1().Nodes().Create(ctx, fakeNode, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + c, err := isNodeTainted(ctx, client.CoreV1()) + assert.Nil(t, err) + assert.Equal(t, false, c) + }) + t.Run("Check node taint with fail", func(t *testing.T) { + ctx := context.Background() + client := testclient.NewSimpleClientset() + fakeNode.SetName("master") + _, err := client.CoreV1().Nodes().Create(ctx, fakeNode, v1.CreateOptions{}) + if err != nil { + t.Error(err) + } + node, err := client.CoreV1().Nodes().Get(ctx, "master", v1.GetOptions{}) + if err != nil { + t.Error(err) + } + node.Spec.Taints = append(node.Spec.Taints, corev1.Taint{ + Effect: taintEffect, + Key: diskPressureTaint, + }) + _, err = client.CoreV1().Nodes().Update(ctx, node, v1.UpdateOptions{}) + if err != nil { + t.Error(err) + } + c, err := isNodeTainted(ctx, client.CoreV1()) + assert.Nil(t, err) + assert.Equal(t, true, c) + }) +} diff --git a/flytectl/pkg/sandbox/status.go b/flytectl/pkg/sandbox/status.go new file mode 100644 index 0000000000..dc965ab138 --- /dev/null +++ b/flytectl/pkg/sandbox/status.go @@ -0,0 +1,22 @@ +package sandbox + +import ( + "context" + "fmt" + + "github.com/enescakir/emoji" + "github.com/flyteorg/flytectl/pkg/docker" +) + +func PrintStatus(ctx context.Context, cli docker.Docker) error { + c, err := docker.GetSandbox(ctx, cli) + if err != nil { + return err + } + if c == nil { + fmt.Printf("%v no Sandbox found \n", emoji.StopSign) + return nil + } + fmt.Printf("Flyte local sandbox container image [%s] with status [%s] is in state [%s]", c.Image, c.Status, c.State) + return nil +} diff --git a/flytectl/pkg/sandbox/status_test.go b/flytectl/pkg/sandbox/status_test.go new file mode 100644 index 0000000000..9d3e847b70 --- /dev/null +++ b/flytectl/pkg/sandbox/status_test.go @@ -0,0 +1,37 @@ +package sandbox + +import ( + "testing" + + "github.com/flyteorg/flytectl/cmd/testutils" + + "github.com/docker/docker/api/types" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/stretchr/testify/assert" +) + +func TestSandboxStatus(t *testing.T) { + t.Run("Sandbox status with zero result", func(t *testing.T) { + mockDocker := &mocks.Docker{} + s := testutils.Setup() + mockDocker.OnContainerList(s.Ctx, types.ContainerListOptions{All: true}).Return([]types.Container{}, nil) + err := PrintStatus(s.Ctx, mockDocker) + assert.Nil(t, err) + }) + t.Run("Sandbox status with running sandbox", func(t *testing.T) { + s := testutils.Setup() + ctx := s.Ctx + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return([]types.Container{ + { + ID: docker.FlyteSandboxClusterName, + Names: []string{ + docker.FlyteSandboxClusterName, + }, + }, + }, nil) + err := PrintStatus(ctx, mockDocker) + assert.Nil(t, err) + }) +} diff --git a/flytectl/pkg/sandbox/teardown.go b/flytectl/pkg/sandbox/teardown.go new file mode 100644 index 0000000000..231a4eb309 --- /dev/null +++ b/flytectl/pkg/sandbox/teardown.go @@ -0,0 +1,49 @@ +package sandbox + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/enescakir/emoji" + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + "github.com/flyteorg/flytectl/pkg/configutil" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/k8s" + "github.com/flyteorg/flytectl/pkg/util" +) + +func Teardown(ctx context.Context, cli docker.Docker, teardownFlags *sandboxCmdConfig.TeardownFlags) error { + c, err := docker.GetSandbox(ctx, cli) + if err != nil { + return err + } + if c != nil { + if err := cli.ContainerRemove(context.Background(), c.ID, types.ContainerRemoveOptions{ + Force: true, + }); err != nil { + return err + } + } + if err := configutil.ConfigCleanup(); err != nil { + fmt.Printf("Config cleanup failed. Which Failed due to %v\n", err) + } + if err := removeSandboxKubeContext(); err != nil { + fmt.Printf("Kubecontext cleanup failed. Which Failed due to %v\n", err) + } + // Teardown volume if option is specified + if teardownFlags.Volume { + if err := cli.VolumeRemove(ctx, docker.FlyteSandboxVolumeName, true); err != nil { + fmt.Printf("Volume cleanup failed. Which Failed due to %v\n", err) + } + } + + fmt.Printf("%v %v Sandbox cluster is removed successfully.\n", emoji.Broom, emoji.Broom) + util.PrintSandboxTeardownMessage(util.SandBoxConsolePort, docker.SandboxKubeconfig) + return nil +} + +func removeSandboxKubeContext() error { + k8sCtxMgr := k8s.NewK8sContextManager() + return k8sCtxMgr.RemoveContext(sandboxContextName) +} diff --git a/flytectl/pkg/sandbox/teardown_test.go b/flytectl/pkg/sandbox/teardown_test.go new file mode 100644 index 0000000000..3f3702c9ad --- /dev/null +++ b/flytectl/pkg/sandbox/teardown_test.go @@ -0,0 +1,49 @@ +package sandbox + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + sandboxCmdConfig "github.com/flyteorg/flytectl/cmd/config/subcommand/sandbox" + "github.com/flyteorg/flytectl/pkg/docker" + "github.com/flyteorg/flytectl/pkg/docker/mocks" + "github.com/flyteorg/flytectl/pkg/k8s" + k8sMocks "github.com/flyteorg/flytectl/pkg/k8s/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestTearDownFunc(t *testing.T) { + var containers []types.Container + container1 := types.Container{ + ID: "FlyteSandboxClusterName", + Names: []string{ + docker.FlyteSandboxClusterName, + }, + } + containers = append(containers, container1) + ctx := context.Background() + + mockDocker := &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(fmt.Errorf("err")) + err := Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.NotNil(t, err) + + mockDocker = &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(nil, fmt.Errorf("err")) + err = Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.NotNil(t, err) + + mockDocker = &mocks.Docker{} + mockDocker.OnContainerList(ctx, types.ContainerListOptions{All: true}).Return(containers, nil) + mockDocker.OnContainerRemove(ctx, mock.Anything, types.ContainerRemoveOptions{Force: true}).Return(nil) + mockK8sContextMgr := &k8sMocks.ContextOps{} + mockK8sContextMgr.OnRemoveContext(mock.Anything).Return(nil) + k8s.ContextMgr = mockK8sContextMgr + err = Teardown(ctx, mockDocker, sandboxCmdConfig.DefaultTeardownFlags) + assert.Nil(t, err) + +} diff --git a/flytectl/pkg/util/util.go b/flytectl/pkg/util/util.go new file mode 100644 index 0000000000..1eaa28ea4a --- /dev/null +++ b/flytectl/pkg/util/util.go @@ -0,0 +1,152 @@ +package util + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/flyteorg/flytectl/pkg/configutil" + "github.com/flyteorg/flytectl/pkg/docker" + + "github.com/enescakir/emoji" + hversion "github.com/hashicorp/go-version" +) + +const ( + ProgressSuccessMessage = "Flyte is ready! Flyte UI is available at" + ProgressSuccessMessagePending = "Flyte would be ready after this! Flyte UI would be available at" + SandBoxConsolePort = 30081 + DemoConsolePort = 30080 +) + +var Ext string + +// WriteIntoFile will write content in a file +func WriteIntoFile(data []byte, file string) error { + err := ioutil.WriteFile(file, data, os.ModePerm) + if err != nil { + return err + } + return nil +} + +func CreatePathAndFile(pathToConfig string) error { + p, err := filepath.Abs(pathToConfig) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil { + return err + } + + // Created a empty file with right permission + if _, err := os.Stat(p); err != nil { + if os.IsNotExist(err) { + if err := os.WriteFile(p, []byte(""), os.ModePerm); err != nil { + return err + } + } + } + return nil +} + +// SetupFlyteDir will create .flyte dir if not exist +func SetupFlyteDir() error { + if err := os.MkdirAll(docker.FlyteSandboxConfigDir, os.ModePerm); err != nil { + return err + } + + // Created a empty file with right permission + if _, err := os.Stat(docker.Kubeconfig); err != nil { + if os.IsNotExist(err) { + if err := os.WriteFile(docker.Kubeconfig, []byte(""), os.ModePerm); err != nil { + return err + } + } + } + + return nil +} + +// PrintDemoStartMessage will print demo start success message +func PrintDemoStartMessage(flyteConsolePort int, kubeconfigLocation string, dryRun bool) { + kubeconfig := strings.Join([]string{ + "$KUBECONFIG", + kubeconfigLocation, + }, ":") + + var successMsg string + if dryRun { + successMsg = fmt.Sprintf("%v http://localhost:%v/console", ProgressSuccessMessagePending, flyteConsolePort) + } else { + successMsg = fmt.Sprintf("%v http://localhost:%v/console", ProgressSuccessMessage, flyteConsolePort) + } + fmt.Printf("%v %v %v %v %v \n", emoji.ManTechnologist, successMsg, emoji.Rocket, emoji.Rocket, emoji.PartyPopper) + fmt.Printf("%v Run the following command to export demo environment variables for accessing flytectl\n", emoji.Sparkle) + fmt.Printf(" export FLYTECTL_CONFIG=%v \n", configutil.FlytectlConfig) + if dryRun { + fmt.Printf("%v Run the following command to export kubeconfig variables for accessing flyte pods locally\n", emoji.Sparkle) + fmt.Printf(" export KUBECONFIG=%v \n", kubeconfig) + } + fmt.Printf("%s Flyte sandbox ships with a Docker registry. Tag and push custom workflow images to localhost:30000\n", emoji.Whale) + fmt.Printf("%s The Minio API is hosted on localhost:30002. Use http://localhost:30080/minio/login for Minio console, default credentials - username: minio, password: miniostorage\n", emoji.OpenFileFolder) +} + +// PrintSandboxStartMessage will print sandbox start success message +func PrintSandboxStartMessage(flyteConsolePort int, kubeconfigLocation string, dryRun bool) { + kubeconfig := strings.Join([]string{ + "$KUBECONFIG", + kubeconfigLocation, + }, ":") + + var successMsg string + if dryRun { + successMsg = fmt.Sprintf("%v http://localhost:%v/console", ProgressSuccessMessagePending, flyteConsolePort) + } else { + successMsg = fmt.Sprintf("%v http://localhost:%v/console", ProgressSuccessMessage, flyteConsolePort) + } + fmt.Printf("%v %v %v %v %v \n", emoji.ManTechnologist, successMsg, emoji.Rocket, emoji.Rocket, emoji.PartyPopper) + fmt.Printf("%v Run the following command to export sandbox environment variables for accessing flytectl\n", emoji.Sparkle) + fmt.Printf(" export FLYTECTL_CONFIG=%v \n", configutil.FlytectlConfig) + if dryRun { + fmt.Printf("%v Run the following command to export kubeconfig variables for accessing flyte pods locally\n", emoji.Sparkle) + fmt.Printf(" export KUBECONFIG=%v \n", kubeconfig) + } +} + +// PrintSandboxTeardownMessage will print sandbox teardown success message +func PrintSandboxTeardownMessage(flyteConsolePort int, kubeconfigLocation string) { + fmt.Printf("%v Run the following command to unset sandbox environment variables for accessing flytectl\n", emoji.Sparkle) + fmt.Printf(" unset FLYTECTL_CONFIG \n") +} + +// SendRequest will create request and return the response +func SendRequest(method, url string, option io.Reader) (*http.Response, error) { + client := &http.Client{} + req, _ := http.NewRequest(method, url, option) + response, err := client.Do(req) + if err != nil { + return nil, err + } + if response.StatusCode != 200 { + return nil, fmt.Errorf("someting goes wrong while sending request to %s. Got status code %v", url, response.StatusCode) + } + return response, nil +} + +// IsVersionGreaterThan check version if it's greater then other +func IsVersionGreaterThan(version1, version2 string) (bool, error) { + semanticVersion1, err := hversion.NewVersion(version1) + if err != nil { + return false, err + } + semanticVersion2, err := hversion.NewVersion(version2) + if err != nil { + return false, err + } + return semanticVersion1.GreaterThan(semanticVersion2), nil +} diff --git a/flytectl/pkg/util/util_test.go b/flytectl/pkg/util/util_test.go new file mode 100644 index 0000000000..dd9de2a1c1 --- /dev/null +++ b/flytectl/pkg/util/util_test.go @@ -0,0 +1,104 @@ +package util + +import ( + "os" + "path/filepath" + "testing" + + "github.com/flyteorg/flytectl/pkg/docker" + + "github.com/stretchr/testify/assert" +) + +const testVersion = "v0.1.20" + +func TestWriteIntoFile(t *testing.T) { + t.Run("Successfully write into a file", func(t *testing.T) { + err := WriteIntoFile([]byte(""), "version.yaml") + assert.Nil(t, err) + }) + t.Run("Error in writing file", func(t *testing.T) { + err := WriteIntoFile([]byte(""), "version.yaml") + assert.Nil(t, err) + }) +} + +func TestSetupFlyteDir(t *testing.T) { + assert.Nil(t, SetupFlyteDir()) +} + +func TestPrintSandboxStartMessage(t *testing.T) { + t.Run("Print Sandbox Message", func(t *testing.T) { + PrintSandboxStartMessage(SandBoxConsolePort, docker.SandboxKubeconfig, false) + }) +} + +func TestPrintSandboxTeardownMessage(t *testing.T) { + t.Run("Print Sandbox Message", func(t *testing.T) { + PrintSandboxTeardownMessage(SandBoxConsolePort, docker.SandboxKubeconfig) + }) +} + +func TestSendRequest(t *testing.T) { + t.Run("Successful get request", func(t *testing.T) { + response, err := SendRequest("GET", "https://github.com", nil) + assert.Nil(t, err) + assert.NotNil(t, response) + }) + t.Run("Successful get request failed", func(t *testing.T) { + response, err := SendRequest("GET", "htp://github.com", nil) + assert.NotNil(t, err) + assert.Nil(t, response) + }) + t.Run("Successful get request failed", func(t *testing.T) { + response, err := SendRequest("GET", "https://github.com/evalsocket/flyte/archive/refs/tags/source-code.zip", nil) + assert.NotNil(t, err) + assert.Nil(t, response) + }) +} + +func TestIsVersionGreaterThan(t *testing.T) { + t.Run("Compare FlyteCTL version when upgrade available", func(t *testing.T) { + _, err := IsVersionGreaterThan("v1.1.21", testVersion) + assert.Nil(t, err) + }) + t.Run("Compare FlyteCTL version greater then", func(t *testing.T) { + ok, err := IsVersionGreaterThan("v1.1.21", testVersion) + assert.Nil(t, err) + assert.Equal(t, true, ok) + }) + t.Run("Compare FlyteCTL version greater then for equal value", func(t *testing.T) { + ok, err := IsVersionGreaterThan(testVersion, testVersion) + assert.Nil(t, err) + assert.Equal(t, false, ok) + }) + t.Run("Compare FlyteCTL version smaller then", func(t *testing.T) { + ok, err := IsVersionGreaterThan("v0.1.19", testVersion) + assert.Nil(t, err) + assert.Equal(t, false, ok) + }) + t.Run("Compare FlyteCTL version", func(t *testing.T) { + _, err := IsVersionGreaterThan(testVersion, testVersion) + assert.Nil(t, err) + }) + t.Run("Error in compare FlyteCTL version", func(t *testing.T) { + _, err := IsVersionGreaterThan("vvvvvvvv", testVersion) + assert.NotNil(t, err) + }) + t.Run("Error in compare FlyteCTL version", func(t *testing.T) { + _, err := IsVersionGreaterThan(testVersion, "vvvvvvvv") + assert.NotNil(t, err) + }) +} + +func TestCreatePathAndFile(t *testing.T) { + dir, err := os.MkdirTemp("", "flytectl") + assert.NoError(t, err) + defer os.RemoveAll(dir) + + testFile := filepath.Join(dir, "testfile.yaml") + err = CreatePathAndFile(testFile) + assert.NoError(t, err) + _, err = os.Stat(testFile) + assert.NoError(t, err) +} diff --git a/flytectl/pkg/util/version.yaml b/flytectl/pkg/util/version.yaml new file mode 100755 index 0000000000..e69de29bb2 diff --git a/flytectl/pkg/visualize/graphviz.go b/flytectl/pkg/visualize/graphviz.go new file mode 100644 index 0000000000..d088a2515b --- /dev/null +++ b/flytectl/pkg/visualize/graphviz.go @@ -0,0 +1,383 @@ +package visualize + +import ( + "fmt" + "strings" + + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" + + graphviz "github.com/awalterschulze/gographviz" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +const ( + // node identifiers + StartNode string = "start-node" + EndNode string = "end-node" + + // subgraph attributes + SubgraphPrefix string = "cluster_" + + // shape attributes + DoubleCircleShape string = "doublecircle" + BoxShape string = "box" + DiamondShape string = "diamond" + ShapeType string = "shape" + + // color attributes + ColorAttr string = "color" + Red string = "red" + Green string = "green" + + // structural attributes + LabelAttr string = "label" + LHeadAttr string = "lhead" + LTailAttr string = "ltail" + + // conditional + ElseFail string = "orElse - Fail" + Else string = "orElse" +) + +func operandToString(op *core.Operand) string { + if op.GetPrimitive() != nil { + l, err := coreutils.ExtractFromLiteral(&core.Literal{Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: op.GetPrimitive(), + }, + }, + }}) + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", l) + } + return op.GetVar() +} + +func comparisonToString(expr *core.ComparisonExpression) string { + return fmt.Sprintf("%s %s %s", operandToString(expr.LeftValue), expr.Operator.String(), operandToString(expr.RightValue)) +} + +func conjunctionToString(expr *core.ConjunctionExpression) string { + return fmt.Sprintf("(%s) %s (%s)", booleanExprToString(expr.LeftExpression), expr.Operator.String(), booleanExprToString(expr.RightExpression)) +} + +func booleanExprToString(expr *core.BooleanExpression) string { + if expr.GetConjunction() != nil { + return conjunctionToString(expr.GetConjunction()) + } + return comparisonToString(expr.GetComparison()) +} + +func constructStartNode(parentGraph string, n string, graph Graphvizer) (*graphviz.Node, error) { + attrs := map[string]string{ShapeType: DoubleCircleShape, ColorAttr: Green} + attrs[LabelAttr] = "start" + err := graph.AddNode(parentGraph, n, attrs) + return graph.GetNode(n), err +} + +func constructEndNode(parentGraph string, n string, graph Graphvizer) (*graphviz.Node, error) { + attrs := map[string]string{ShapeType: DoubleCircleShape, ColorAttr: Red} + attrs[LabelAttr] = "end" + err := graph.AddNode(parentGraph, n, attrs) + return graph.GetNode(n), err +} + +func constructTaskNode(parentGraph string, name string, graph Graphvizer, n *core.Node, t *core.CompiledTask) (*graphviz.Node, error) { + attrs := map[string]string{ShapeType: BoxShape} + if n.Metadata != nil && n.Metadata.Name != "" { + v := strings.LastIndexAny(n.Metadata.Name, ".") + attrs[LabelAttr] = fmt.Sprintf("\"%s [%s]\"", n.Metadata.Name[v+1:], t.Template.Type) + } + tName := strings.ReplaceAll(name, "-", "_") + err := graph.AddNode(parentGraph, tName, attrs) + return graph.GetNode(tName), err +} + +func constructErrorNode(parentGraph string, name string, graph Graphvizer, m string) (*graphviz.Node, error) { + attrs := map[string]string{ShapeType: BoxShape, ColorAttr: Red, LabelAttr: fmt.Sprintf("\"%s\"", m)} + eName := strings.ReplaceAll(name, "-", "_") + err := graph.AddNode(parentGraph, eName, attrs) + return graph.GetNode(eName), err +} + +func constructBranchConditionNode(parentGraph string, name string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { + attrs := map[string]string{ShapeType: DiamondShape} + if n.Metadata != nil && n.Metadata.Name != "" { + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", n.Metadata.Name) + } + cName := strings.ReplaceAll(name, "-", "_") + err := graph.AddNode(parentGraph, cName, attrs) + return graph.GetNode(cName), err +} + +func getName(prefix, id string) string { + if prefix != "" { + return prefix + "_" + id + } + return id +} + +type graphBuilder struct { + // Mutated as graph is built + graphNodes map[string]*graphviz.Node + // Mutated as graph is built. lookup table for all graphviz compiled edges. + graphEdges map[string]*graphviz.Edge + // lookup table for all graphviz compiled subgraphs + subWf map[string]*core.CompiledWorkflow + // a lookup table for all tasks in the graph + tasks map[string]*core.CompiledTask + // a lookup for all node clusters. This is to remap the edges to the cluster itself (instead of the node) + // this is useful in the case of branchNodes and subworkflow nodes + nodeClusters map[string]string +} + +func (gb *graphBuilder) addBranchSubNodeEdge(graph Graphvizer, parentNode, n *graphviz.Node, label string) error { + edgeName := fmt.Sprintf("%s-%s", parentNode.Name, n.Name) + if _, ok := gb.graphEdges[edgeName]; !ok { + attrs := map[string]string{} + if c, ok := gb.nodeClusters[n.Name]; ok { + attrs[LHeadAttr] = fmt.Sprintf("\"%s\"", c) + } + attrs[LabelAttr] = fmt.Sprintf("\"%s\"", label) + err := graph.AddEdge(parentNode.Name, n.Name, true, attrs) + if err != nil { + return err + } + gb.graphEdges[edgeName] = graph.GetEdge(parentNode.Name, n.Name) + } + return nil +} + +func (gb *graphBuilder) constructBranchNode(parentGraph string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { + parentBranchNode, err := constructBranchConditionNode(parentGraph, getName(prefix, n.Id), graph, n) + if err != nil { + return nil, err + } + gb.graphNodes[n.Id] = parentBranchNode + + if n.GetBranchNode().GetIfElse() == nil { + return parentBranchNode, nil + } + + subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().Case.ThenNode) + if err != nil { + return nil, err + } + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(n.GetBranchNode().GetIfElse().Case.Condition)); err != nil { + return nil, err + } + + if n.GetBranchNode().GetIfElse().GetError() != nil { + name := fmt.Sprintf("%s-error", parentBranchNode.Name) + subNode, err := constructErrorNode(prefix, name, graph, n.GetBranchNode().GetIfElse().GetError().Message) + if err != nil { + return nil, err + } + gb.graphNodes[name] = subNode + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, ElseFail); err != nil { + return nil, err + } + } else { + subNode, err := gb.constructNode(parentGraph, prefix, graph, n.GetBranchNode().GetIfElse().GetElseNode()) + if err != nil { + return nil, err + } + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, Else); err != nil { + return nil, err + } + } + + if n.GetBranchNode().GetIfElse().GetOther() != nil { + for _, c := range n.GetBranchNode().GetIfElse().GetOther() { + subNode, err := gb.constructNode(parentGraph, prefix, graph, c.ThenNode) + if err != nil { + return nil, err + } + if err := gb.addBranchSubNodeEdge(graph, parentBranchNode, subNode, booleanExprToString(c.Condition)); err != nil { + return nil, err + } + } + } + return parentBranchNode, nil +} + +func (gb *graphBuilder) constructNode(parentGraphName string, prefix string, graph Graphvizer, n *core.Node) (*graphviz.Node, error) { + name := getName(prefix, n.Id) + var err error + var gn *graphviz.Node + + if n.Id == StartNode { + gn, err = constructStartNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph) + gb.nodeClusters[name] = parentGraphName + } else if n.Id == EndNode { + gn, err = constructEndNode(parentGraphName, strings.ReplaceAll(name, "-", "_"), graph) + gb.nodeClusters[name] = parentGraphName + } else { + switch n.Target.(type) { + case *core.Node_TaskNode: + tID := n.GetTaskNode().GetReferenceId().String() + t, ok := gb.tasks[tID] + if !ok { + return nil, fmt.Errorf("failed to find task [%s] in closure", tID) + } + gn, err = constructTaskNode(parentGraphName, name, graph, n, t) + if err != nil { + return nil, err + } + gb.nodeClusters[name] = parentGraphName + case *core.Node_BranchNode: + sanitizedName := strings.ReplaceAll(n.Metadata.Name, "-", "_") + branchSubGraphName := SubgraphPrefix + sanitizedName + err := graph.AddSubGraph(parentGraphName, branchSubGraphName, map[string]string{LabelAttr: sanitizedName}) + if err != nil { + return nil, err + } + gn, err = gb.constructBranchNode(branchSubGraphName, prefix, graph, n) + if err != nil { + return nil, err + } + gb.nodeClusters[name] = branchSubGraphName + case *core.Node_WorkflowNode: + if n.GetWorkflowNode().GetLaunchplanRef() != nil { + attrs := map[string]string{} + err := graph.AddNode(parentGraphName, name, attrs) + if err != nil { + return nil, err + } + } else { + sanitizedName := strings.ReplaceAll(name, "-", "_") + subGraphName := SubgraphPrefix + sanitizedName + err := graph.AddSubGraph(parentGraphName, subGraphName, map[string]string{LabelAttr: sanitizedName}) + if err != nil { + return nil, err + } + subGB := graphBuilderFromParent(gb) + swf, ok := gb.subWf[n.GetWorkflowNode().GetSubWorkflowRef().String()] + if !ok { + return nil, fmt.Errorf("subworkfow [%s] not found", n.GetWorkflowNode().GetSubWorkflowRef().String()) + } + if err := subGB.constructGraph(subGraphName, name, graph, swf); err != nil { + return nil, err + } + gn = subGB.graphNodes[StartNode] + gb.nodeClusters[gn.Name] = subGraphName + } + } + } + if err != nil { + return nil, err + } + gb.graphNodes[n.Id] = gn + return gn, nil +} + +func (gb *graphBuilder) addEdge(fromNodeName, toNodeName string, graph Graphvizer) error { + toNode, toOk := gb.graphNodes[toNodeName] + fromNode, fromOk := gb.graphNodes[fromNodeName] + if !toOk || !fromOk { + return fmt.Errorf("nodes[%s] -> [%s] referenced before creation", fromNodeName, toNodeName) + } + if !graph.DoesEdgeExist(fromNode.Name, toNode.Name) { + attrs := map[string]string{} + // Now lets check that the toNode or the fromNode is a cluster. If so then following this thread, + // https://stackoverflow.com/questions/2012036/graphviz-how-to-connect-subgraphs, we will connect the cluster + if c, ok := gb.nodeClusters[fromNode.Name]; ok { + attrs[LTailAttr] = fmt.Sprintf("\"%s\"", c) + } + if c, ok := gb.nodeClusters[toNode.Name]; ok { + attrs[LHeadAttr] = fmt.Sprintf("\"%s\"", c) + } + err := graph.AddEdge(fromNode.Name, toNode.Name, true, attrs) + if err != nil { + return err + } + } + return nil +} + +func (gb *graphBuilder) constructGraph(parentGraphName string, prefix string, graph Graphvizer, w *core.CompiledWorkflow) error { + if w == nil || w.Template == nil { + return nil + } + for _, n := range w.Template.Nodes { + if _, err := gb.constructNode(parentGraphName, prefix, graph, n); err != nil { + return err + } + } + + for name := range gb.graphNodes { + upstreamNodes := w.Connections.Upstream[name] + downstreamNodes := w.Connections.Downstream[name] + if downstreamNodes != nil { + for _, n := range downstreamNodes.Ids { + if err := gb.addEdge(name, n, graph); err != nil { + return err + } + } + } + if upstreamNodes != nil { + for _, n := range upstreamNodes.Ids { + if err := gb.addEdge(n, name, graph); err != nil { + return err + } + } + } + } + return nil +} + +func (gb *graphBuilder) CompiledWorkflowClosureToGraph(w *core.CompiledWorkflowClosure) (FlyteGraph, error) { + dotGraph := FlyteGraph{graphviz.NewGraph()} + _ = dotGraph.SetDir(true) + _ = dotGraph.SetStrict(true) + + tLookup := make(map[string]*core.CompiledTask) + for _, t := range w.Tasks { + if t.Template == nil || t.Template.Id == nil { + return FlyteGraph{}, fmt.Errorf("no template found in the workflow task %v", t) + } + tLookup[t.Template.Id.String()] = t + } + gb.tasks = tLookup + wLookup := make(map[string]*core.CompiledWorkflow) + for _, swf := range w.SubWorkflows { + if swf.Template == nil || swf.Template.Id == nil { + return FlyteGraph{}, fmt.Errorf("no template found in the sub workflow %v", swf) + } + wLookup[swf.Template.Id.String()] = swf + } + gb.subWf = wLookup + + return dotGraph, gb.constructGraph("", "", dotGraph, w.Primary) +} + +func newGraphBuilder() *graphBuilder { + return &graphBuilder{ + graphNodes: make(map[string]*graphviz.Node), + graphEdges: make(map[string]*graphviz.Edge), + nodeClusters: make(map[string]string), + } +} + +func graphBuilderFromParent(gb *graphBuilder) *graphBuilder { + newGB := newGraphBuilder() + newGB.subWf = gb.subWf + newGB.tasks = gb.tasks + return newGB +} + +// RenderWorkflow Renders the workflow graph on the console +func RenderWorkflow(w *core.CompiledWorkflowClosure) (string, error) { + if w == nil { + return "", fmt.Errorf("empty workflow closure") + } + gb := newGraphBuilder() + graph, err := gb.CompiledWorkflowClosureToGraph(w) + if err != nil { + return "", err + } + return graph.String(), nil +} diff --git a/flytectl/pkg/visualize/graphviz_test.go b/flytectl/pkg/visualize/graphviz_test.go new file mode 100644 index 0000000000..710ce01f1d --- /dev/null +++ b/flytectl/pkg/visualize/graphviz_test.go @@ -0,0 +1,470 @@ +package visualize + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flytectl/pkg/visualize/mocks" + + graphviz "github.com/awalterschulze/gographviz" + "github.com/golang/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestRenderWorkflowBranch(t *testing.T) { + // Sadly we cannot compare the output of svg, as it slightly changes. + file := []string{"compiled_closure_branch_nested", "compiled_subworkflows"} + + for _, s := range file { + t.Run(s, func(t *testing.T) { + r, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.json", s)) + assert.NoError(t, err) + + i := bytes.NewReader(r) + + c := &core.CompiledWorkflowClosure{} + err = jsonpb.Unmarshal(i, c) + assert.NoError(t, err) + b, err := RenderWorkflow(c) + fmt.Println(b) + assert.NoError(t, err) + assert.NotNil(t, b) + }) + } +} + +func TestAddBranchSubNodeEdge(t *testing.T) { + attrs := map[string]string{} + attrs[LHeadAttr] = fmt.Sprintf("\"%s\"", "innerGraph") + attrs[LabelAttr] = fmt.Sprintf("\"%s\"", "label") + t.Run("Successful", func(t *testing.T) { + gb := newGraphBuilder() + gb.nodeClusters["n"] = "innerGraph" + parentNode := &graphviz.Node{Name: "parentNode", Attrs: nil} + n := &graphviz.Node{Name: "n"} + + mockGraph := &mocks.Graphvizer{} + // Verify the attributes + mockGraph.OnAddEdgeMatch(mock.Anything, mock.Anything, mock.Anything, attrs).Return(nil) + mockGraph.OnGetEdgeMatch(mock.Anything, mock.Anything).Return(&graphviz.Edge{}) + err := gb.addBranchSubNodeEdge(mockGraph, parentNode, n, "label") + assert.NoError(t, err) + }) + + t.Run("Error", func(t *testing.T) { + gb := newGraphBuilder() + gb.nodeClusters["n"] = "innerGraph" + parentNode := &graphviz.Node{Name: "parentNode", Attrs: nil} + n := &graphviz.Node{Name: "n"} + + mockGraph := &mocks.Graphvizer{} + // Verify the attributes + mockGraph.OnAddEdgeMatch(mock.Anything, mock.Anything, mock.Anything, attrs).Return(fmt.Errorf("error adding edge")) + err := gb.addBranchSubNodeEdge(mockGraph, parentNode, n, "label") + assert.NotNil(t, err) + }) +} + +func TestConstructBranchNode(t *testing.T) { + attrs := map[string]string{} + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", "nodeMetadata") + attrs[ShapeType] = DiamondShape + t.Run("Successful", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + expectedGraphvizNode := &graphviz.Node{ + Name: "brancheName_id", + Attrs: map[graphviz.Attr]string{"label": fmt.Sprintf("\"[%s]\"", "nodeMetadata"), + "shape": "diamond"}, + } + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(nil) + mockGraph.OnGetNodeMatch(mock.Anything).Return(expectedGraphvizNode) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{}, + }, + } + resultBranchNode, err := gb.constructBranchNode("parentGraph", "branchName", mockGraph, flyteNode) + assert.NoError(t, err) + assert.NotNil(t, resultBranchNode) + assert.Equal(t, expectedGraphvizNode, resultBranchNode) + }) + + t.Run("Add Node Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(fmt.Errorf("unable to add node")) + mockGraph.OnGetNodeMatch(mock.Anything).Return(nil) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{}, + }, + } + resultBranchNode, err := gb.constructBranchNode("parentGraph", "branchName", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Nil(t, resultBranchNode) + }) + + t.Run("Add ThenNode Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + attrs := map[string]string{} + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", "nodeMetadata") + attrs[ShapeType] = DiamondShape + + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, "branchName_id", attrs).Return(nil) + mockGraph.OnAddNodeMatch(mock.Anything, "branchName_start_node", mock.Anything).Return(fmt.Errorf("unable to add node")) + mockGraph.OnGetNodeMatch(mock.Anything).Return(nil) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{ + IfElse: &core.IfElseBlock{ + Case: &core.IfBlock{ + Condition: &core.BooleanExpression{}, + ThenNode: &core.Node{ + Id: "start-node", + }, + }, + }, + }, + }, + } + resultBranchNode, err := gb.constructBranchNode("parentGraph", "branchName", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to add node"), err) + assert.Nil(t, resultBranchNode) + }) + + t.Run("Add Condition Node Edge Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + attrs := map[string]string{} + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", "nodeMetadata") + attrs[ShapeType] = DiamondShape + + parentNode := &graphviz.Node{Name: "parentNode", Attrs: nil} + thenBranchStartNode := &graphviz.Node{Name: "branchName_start_node", Attrs: nil} + + mockGraph.OnAddNodeMatch(mock.Anything, "branchName_id", attrs).Return(nil) + mockGraph.OnAddNodeMatch(mock.Anything, "branchName_start_node", mock.Anything).Return(nil) + mockGraph.OnGetNodeMatch("branchName_id").Return(parentNode) + mockGraph.OnGetNodeMatch("branchName_start_node").Return(thenBranchStartNode) + mockGraph.OnAddEdgeMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("unable to add edge")) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{ + IfElse: &core.IfElseBlock{ + Case: &core.IfBlock{ + Condition: &core.BooleanExpression{ + Expr: &core.BooleanExpression_Comparison{ + Comparison: &core.ComparisonExpression{ + Operator: core.ComparisonExpression_EQ, + LeftValue: &core.Operand{ + Val: &core.Operand_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 40, + }, + }, + }, + }, + RightValue: &core.Operand{ + Val: &core.Operand_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 50, + }, + }, + }, + }, + }, + }, + }, + ThenNode: &core.Node{ + Id: "start-node", + }, + }, + }, + }, + }, + } + resultBranchNode, err := gb.constructBranchNode("parentGraph", "branchName", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to add edge"), err) + assert.Nil(t, resultBranchNode) + }) +} + +func TestConstructNode(t *testing.T) { + + t.Run("Start-Node", func(t *testing.T) { + attrs := map[string]string{} + attrs[LabelAttr] = "start" + attrs[ShapeType] = DoubleCircleShape + attrs[ColorAttr] = Green + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + expectedGraphvizNode := &graphviz.Node{ + Name: "start-node", + Attrs: map[graphviz.Attr]string{"label": "start", "shape": "doublecircle", "color": "green"}, + } + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(nil) + mockGraph.OnGetNodeMatch(mock.Anything).Return(expectedGraphvizNode) + flyteNode := &core.Node{ + Id: "start-node", + } + resultNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NoError(t, err) + assert.NotNil(t, resultNode) + assert.Equal(t, expectedGraphvizNode, resultNode) + }) + + t.Run("End-Node", func(t *testing.T) { + attrs := map[string]string{} + attrs[LabelAttr] = "end" + attrs[ShapeType] = DoubleCircleShape + attrs[ColorAttr] = Red + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + expectedGraphvizNode := &graphviz.Node{ + Name: "end-node", + Attrs: map[graphviz.Attr]string{"label": "end", "shape": "doublecircle", "color": "red"}, + } + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(nil) + mockGraph.OnGetNodeMatch(mock.Anything).Return(expectedGraphvizNode) + flyteNode := &core.Node{ + Id: "end-node", + } + resultNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NoError(t, err) + assert.NotNil(t, resultNode) + assert.Equal(t, expectedGraphvizNode, resultNode) + }) + + t.Run("Task-Node-Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_TaskNode{ + TaskNode: &core.TaskNode{ + Reference: &core.TaskNode_ReferenceId{ + ReferenceId: &core.Identifier{ + Project: "dummyProject", + Domain: "dummyDomain", + Name: "dummyName", + Version: "dummyVersion", + }, + }, + }, + }, + } + resultNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("failed to find task [%s] in closure", + flyteNode.GetTaskNode().GetReferenceId().String()), err) + assert.Nil(t, resultNode) + }) + + t.Run("Branch-Node-SubGraph-Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + // Verify the attributes + mockGraph.OnAddSubGraphMatch("", SubgraphPrefix+"nodeMetadata", + mock.Anything).Return(fmt.Errorf("unable to create subgraph")) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{}, + }, + } + resultBranchNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to create subgraph"), err) + assert.Nil(t, resultBranchNode) + }) + + t.Run("Branch-Node-Add-Error", func(t *testing.T) { + attrs := map[string]string{} + attrs[LabelAttr] = fmt.Sprintf("\"[%s]\"", "nodeMetadata") + attrs[ShapeType] = DiamondShape + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + + // Verify the attributes + mockGraph.OnAddSubGraphMatch(mock.Anything, SubgraphPrefix+"nodeMetadata", mock.Anything).Return(nil) + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(fmt.Errorf("unable to add node")) + mockGraph.OnGetNodeMatch(mock.Anything).Return(nil) + + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_BranchNode{ + BranchNode: &core.BranchNode{}, + }, + } + resultBranchNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to add node"), err) + assert.Nil(t, resultBranchNode) + }) + + t.Run("Workflow-Node-Add-Error", func(t *testing.T) { + attrs := map[string]string{} + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + + // Verify the attributes + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, attrs).Return(fmt.Errorf("unable to add node")) + mockGraph.OnGetNodeMatch(mock.Anything).Return(nil) + + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_WorkflowNode{ + WorkflowNode: &core.WorkflowNode{ + Reference: &core.WorkflowNode_LaunchplanRef{ + LaunchplanRef: &core.Identifier{ + Project: "dummyProject", + Domain: "dummyDomain", + Name: "dummyName", + Version: "dummyVersion", + }, + }, + }, + }, + } + resultWorkflowNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to add node"), err) + assert.Nil(t, resultWorkflowNode) + }) + + t.Run("Workflow-Node-SubGraph-Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + // Verify the attributes + mockGraph.OnAddSubGraphMatch("", SubgraphPrefix+"id", + mock.Anything).Return(fmt.Errorf("unable to create subgraph")) + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_WorkflowNode{ + WorkflowNode: &core.WorkflowNode{}, + }, + } + resultWorkflowNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to create subgraph"), err) + assert.Nil(t, resultWorkflowNode) + }) + t.Run("Workflow-Node-Subworkflow-NotFound-Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + + // Verify the attributes + mockGraph.OnAddSubGraphMatch(mock.Anything, SubgraphPrefix+"id", mock.Anything).Return(nil) + + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_WorkflowNode{ + WorkflowNode: &core.WorkflowNode{ + Reference: &core.WorkflowNode_SubWorkflowRef{ + SubWorkflowRef: &core.Identifier{ + Project: "dummyProject", + Domain: "dummyDomain", + Name: "dummyName", + Version: "dummyVersion", + }, + }, + }, + }, + } + resultWorkflowNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("subworkfow [project:\"dummyProject\" domain:\"dummyDomain\" name:\"dummyName\" version:\"dummyVersion\" ] not found"), err) + assert.Nil(t, resultWorkflowNode) + }) + + t.Run("Workflow-Node-Subworkflow-Graph-Create-Error", func(t *testing.T) { + gb := newGraphBuilder() + mockGraph := &mocks.Graphvizer{} + + // Verify the attributes + mockGraph.OnAddSubGraphMatch(mock.Anything, SubgraphPrefix+"id", mock.Anything).Return(nil) + mockGraph.OnAddNodeMatch(mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("unable to add node")) + mockGraph.OnGetNodeMatch(mock.Anything).Return(nil) + + gb.subWf = make(map[string]*core.CompiledWorkflow) + subwfNode := &core.Node{ + Id: "start-node", + } + sbwfNodes := []*core.Node{subwfNode} + + gb.subWf["project:\"dummyProject\" domain:\"dummyDomain\" name:\"dummyName\" version:\"dummyVersion\" "] = + &core.CompiledWorkflow{Template: &core.WorkflowTemplate{Nodes: sbwfNodes}} + flyteNode := &core.Node{ + Id: "id", + Metadata: &core.NodeMetadata{ + Name: "nodeMetadata", + }, + Target: &core.Node_WorkflowNode{ + WorkflowNode: &core.WorkflowNode{ + Reference: &core.WorkflowNode_SubWorkflowRef{ + SubWorkflowRef: &core.Identifier{ + Project: "dummyProject", + Domain: "dummyDomain", + Name: "dummyName", + Version: "dummyVersion", + }, + }, + }, + }, + } + resultWorkflowNode, err := gb.constructNode("", "", mockGraph, flyteNode) + assert.NotNil(t, err) + assert.Equal(t, fmt.Errorf("unable to add node"), err) + assert.Nil(t, resultWorkflowNode) + }) + +} diff --git a/flytectl/pkg/visualize/graphvizer.go b/flytectl/pkg/visualize/graphvizer.go new file mode 100644 index 0000000000..7fcb24ee08 --- /dev/null +++ b/flytectl/pkg/visualize/graphvizer.go @@ -0,0 +1,35 @@ +package visualize + +import graphviz "github.com/awalterschulze/gographviz" + +//go:generate mockery -all -case=underscore + +type Graphvizer interface { + AddEdge(src, dst string, directed bool, attrs map[string]string) error + AddNode(parentGraph string, name string, attrs map[string]string) error + AddSubGraph(parentGraph string, name string, attrs map[string]string) error + AddAttr(parentGraph string, field string, value string) error + SetName(name string) error + GetEdge(src, dest string) *graphviz.Edge + GetNode(key string) *graphviz.Node + DoesEdgeExist(src, dest string) bool +} + +type FlyteGraph struct { + *graphviz.Graph +} + +// GetNode given the key to the node +func (g FlyteGraph) GetNode(key string) *graphviz.Node { + return g.Nodes.Lookup[key] +} + +// GetEdge gets the edge in the graph from src to dest +func (g FlyteGraph) GetEdge(src, dest string) *graphviz.Edge { + return g.Edges.SrcToDsts[src][dest][0] +} + +// DoesEdgeExist checks if an edge exists in the graph from src to dest +func (g FlyteGraph) DoesEdgeExist(src, dest string) bool { + return g.Edges.SrcToDsts[src][dest] != nil +} diff --git a/flytectl/pkg/visualize/mocks/graphvizer.go b/flytectl/pkg/visualize/mocks/graphvizer.go new file mode 100644 index 0000000000..1b1f8b6e24 --- /dev/null +++ b/flytectl/pkg/visualize/mocks/graphvizer.go @@ -0,0 +1,273 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + gographviz "github.com/awalterschulze/gographviz" + mock "github.com/stretchr/testify/mock" +) + +// Graphvizer is an autogenerated mock type for the Graphvizer type +type Graphvizer struct { + mock.Mock +} + +type Graphvizer_AddAttr struct { + *mock.Call +} + +func (_m Graphvizer_AddAttr) Return(_a0 error) *Graphvizer_AddAttr { + return &Graphvizer_AddAttr{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnAddAttr(parentGraph string, field string, value string) *Graphvizer_AddAttr { + c_call := _m.On("AddAttr", parentGraph, field, value) + return &Graphvizer_AddAttr{Call: c_call} +} + +func (_m *Graphvizer) OnAddAttrMatch(matchers ...interface{}) *Graphvizer_AddAttr { + c_call := _m.On("AddAttr", matchers...) + return &Graphvizer_AddAttr{Call: c_call} +} + +// AddAttr provides a mock function with given fields: parentGraph, field, value +func (_m *Graphvizer) AddAttr(parentGraph string, field string, value string) error { + ret := _m.Called(parentGraph, field, value) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, string) error); ok { + r0 = rf(parentGraph, field, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Graphvizer_AddEdge struct { + *mock.Call +} + +func (_m Graphvizer_AddEdge) Return(_a0 error) *Graphvizer_AddEdge { + return &Graphvizer_AddEdge{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnAddEdge(src string, dst string, directed bool, attrs map[string]string) *Graphvizer_AddEdge { + c_call := _m.On("AddEdge", src, dst, directed, attrs) + return &Graphvizer_AddEdge{Call: c_call} +} + +func (_m *Graphvizer) OnAddEdgeMatch(matchers ...interface{}) *Graphvizer_AddEdge { + c_call := _m.On("AddEdge", matchers...) + return &Graphvizer_AddEdge{Call: c_call} +} + +// AddEdge provides a mock function with given fields: src, dst, directed, attrs +func (_m *Graphvizer) AddEdge(src string, dst string, directed bool, attrs map[string]string) error { + ret := _m.Called(src, dst, directed, attrs) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, bool, map[string]string) error); ok { + r0 = rf(src, dst, directed, attrs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Graphvizer_AddNode struct { + *mock.Call +} + +func (_m Graphvizer_AddNode) Return(_a0 error) *Graphvizer_AddNode { + return &Graphvizer_AddNode{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnAddNode(parentGraph string, name string, attrs map[string]string) *Graphvizer_AddNode { + c_call := _m.On("AddNode", parentGraph, name, attrs) + return &Graphvizer_AddNode{Call: c_call} +} + +func (_m *Graphvizer) OnAddNodeMatch(matchers ...interface{}) *Graphvizer_AddNode { + c_call := _m.On("AddNode", matchers...) + return &Graphvizer_AddNode{Call: c_call} +} + +// AddNode provides a mock function with given fields: parentGraph, name, attrs +func (_m *Graphvizer) AddNode(parentGraph string, name string, attrs map[string]string) error { + ret := _m.Called(parentGraph, name, attrs) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, map[string]string) error); ok { + r0 = rf(parentGraph, name, attrs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Graphvizer_AddSubGraph struct { + *mock.Call +} + +func (_m Graphvizer_AddSubGraph) Return(_a0 error) *Graphvizer_AddSubGraph { + return &Graphvizer_AddSubGraph{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnAddSubGraph(parentGraph string, name string, attrs map[string]string) *Graphvizer_AddSubGraph { + c_call := _m.On("AddSubGraph", parentGraph, name, attrs) + return &Graphvizer_AddSubGraph{Call: c_call} +} + +func (_m *Graphvizer) OnAddSubGraphMatch(matchers ...interface{}) *Graphvizer_AddSubGraph { + c_call := _m.On("AddSubGraph", matchers...) + return &Graphvizer_AddSubGraph{Call: c_call} +} + +// AddSubGraph provides a mock function with given fields: parentGraph, name, attrs +func (_m *Graphvizer) AddSubGraph(parentGraph string, name string, attrs map[string]string) error { + ret := _m.Called(parentGraph, name, attrs) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string, map[string]string) error); ok { + r0 = rf(parentGraph, name, attrs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Graphvizer_DoesEdgeExist struct { + *mock.Call +} + +func (_m Graphvizer_DoesEdgeExist) Return(_a0 bool) *Graphvizer_DoesEdgeExist { + return &Graphvizer_DoesEdgeExist{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnDoesEdgeExist(src string, dest string) *Graphvizer_DoesEdgeExist { + c_call := _m.On("DoesEdgeExist", src, dest) + return &Graphvizer_DoesEdgeExist{Call: c_call} +} + +func (_m *Graphvizer) OnDoesEdgeExistMatch(matchers ...interface{}) *Graphvizer_DoesEdgeExist { + c_call := _m.On("DoesEdgeExist", matchers...) + return &Graphvizer_DoesEdgeExist{Call: c_call} +} + +// DoesEdgeExist provides a mock function with given fields: src, dest +func (_m *Graphvizer) DoesEdgeExist(src string, dest string) bool { + ret := _m.Called(src, dest) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, string) bool); ok { + r0 = rf(src, dest) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +type Graphvizer_GetEdge struct { + *mock.Call +} + +func (_m Graphvizer_GetEdge) Return(_a0 *gographviz.Edge) *Graphvizer_GetEdge { + return &Graphvizer_GetEdge{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnGetEdge(src string, dest string) *Graphvizer_GetEdge { + c_call := _m.On("GetEdge", src, dest) + return &Graphvizer_GetEdge{Call: c_call} +} + +func (_m *Graphvizer) OnGetEdgeMatch(matchers ...interface{}) *Graphvizer_GetEdge { + c_call := _m.On("GetEdge", matchers...) + return &Graphvizer_GetEdge{Call: c_call} +} + +// GetEdge provides a mock function with given fields: src, dest +func (_m *Graphvizer) GetEdge(src string, dest string) *gographviz.Edge { + ret := _m.Called(src, dest) + + var r0 *gographviz.Edge + if rf, ok := ret.Get(0).(func(string, string) *gographviz.Edge); ok { + r0 = rf(src, dest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gographviz.Edge) + } + } + + return r0 +} + +type Graphvizer_GetNode struct { + *mock.Call +} + +func (_m Graphvizer_GetNode) Return(_a0 *gographviz.Node) *Graphvizer_GetNode { + return &Graphvizer_GetNode{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnGetNode(key string) *Graphvizer_GetNode { + c_call := _m.On("GetNode", key) + return &Graphvizer_GetNode{Call: c_call} +} + +func (_m *Graphvizer) OnGetNodeMatch(matchers ...interface{}) *Graphvizer_GetNode { + c_call := _m.On("GetNode", matchers...) + return &Graphvizer_GetNode{Call: c_call} +} + +// GetNode provides a mock function with given fields: key +func (_m *Graphvizer) GetNode(key string) *gographviz.Node { + ret := _m.Called(key) + + var r0 *gographviz.Node + if rf, ok := ret.Get(0).(func(string) *gographviz.Node); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*gographviz.Node) + } + } + + return r0 +} + +type Graphvizer_SetName struct { + *mock.Call +} + +func (_m Graphvizer_SetName) Return(_a0 error) *Graphvizer_SetName { + return &Graphvizer_SetName{Call: _m.Call.Return(_a0)} +} + +func (_m *Graphvizer) OnSetName(name string) *Graphvizer_SetName { + c_call := _m.On("SetName", name) + return &Graphvizer_SetName{Call: c_call} +} + +func (_m *Graphvizer) OnSetNameMatch(matchers ...interface{}) *Graphvizer_SetName { + c_call := _m.On("SetName", matchers...) + return &Graphvizer_SetName{Call: c_call} +} + +// SetName provides a mock function with given fields: name +func (_m *Graphvizer) SetName(name string) error { + ret := _m.Called(name) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(name) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flytectl/pkg/visualize/testdata/compiled_closure_branch_nested.json b/flytectl/pkg/visualize/testdata/compiled_closure_branch_nested.json new file mode 100644 index 0000000000..baae3d9926 --- /dev/null +++ b/flytectl/pkg/visualize/testdata/compiled_closure_branch_nested.json @@ -0,0 +1,553 @@ +{ + "primary": { + "template": { + "id": { + "resourceType": "WORKFLOW", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.so_nested", + "version": "v1" + }, + "metadata": {}, + "interface": { + "inputs": { + "variables": { + "my_input": { + "type": { + "simple": "FLOAT" + }, + "description": "my_input" + } + } + }, + "outputs": { + "variables": { + "o0": { + "type": { + "simple": "FLOAT" + }, + "description": "o0" + } + } + } + }, + "nodes": [ + { + "id": "start-node" + }, + { + "id": "end-node", + "inputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "n0", + "var": "o0" + } + } + } + ] + }, + { + "id": "n0", + "metadata": { + "name": "fractions", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": ".my_input", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "branchNode": { + "ifElse": { + "case": { + "condition": { + "conjunction": { + "leftExpression": { + "comparison": { + "operator": "GT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 0.1 + } + } + } + }, + "rightExpression": { + "comparison": { + "operator": "LT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 1 + } + } + } + } + } + }, + "thenNode": { + "id": "n0-n0", + "metadata": { + "name": "inner_fractions", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": ".my_input", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "branchNode": { + "ifElse": { + "case": { + "condition": { + "comparison": { + "operator": "LT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 0.5 + } + } + } + }, + "thenNode": { + "id": "n0-n0-n0-n0", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.run_conditions.double", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "n", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.double", + "version": "v1" + } + } + } + }, + "other": [ + { + "condition": { + "conjunction": { + "leftExpression": { + "comparison": { + "operator": "GT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 0.5 + } + } + } + }, + "rightExpression": { + "comparison": { + "operator": "LT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 0.7 + } + } + } + } + } + }, + "thenNode": { + "id": "n0-n0-n0-n1", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.run_conditions.square", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "n", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.square", + "version": "v1" + } + } + } + } + ], + "error": { + "failedNodeId": "inner_fractions", + "message": "Only \u003c0.7 allowed" + } + } + } + } + }, + "other": [ + { + "condition": { + "conjunction": { + "leftExpression": { + "comparison": { + "operator": "GT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 1 + } + } + } + }, + "rightExpression": { + "comparison": { + "operator": "LT", + "leftValue": { + "var": ".my_input" + }, + "rightValue": { + "primitive": { + "floatValue": 10 + } + } + } + } + } + }, + "thenNode": { + "id": "n0-n1", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.run_conditions.square", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "n", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.square", + "version": "v1" + } + } + } + } + ], + "elseNode": { + "id": "n0-n2", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.run_conditions.double", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "n", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "my_input" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.double", + "version": "v1" + } + } + } + } + } + } + ], + "outputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "n0", + "var": "o0" + } + } + } + ], + "metadataDefaults": {} + }, + "connections": { + "downstream": { + "n0": { + "ids": [ + "end-node" + ] + }, + "start-node": { + "ids": [ + "n0" + ] + } + }, + "upstream": { + "end-node": { + "ids": [ + "n0" + ] + }, + "n0": { + "ids": [ + "start-node" + ] + }, + "n0-n0": { + "ids": [ + "start-node" + ] + }, + "n0-n1": { + "ids": [ + "start-node" + ] + }, + "n0-n2": { + "ids": [ + "start-node" + ] + }, + "n0-n0-n0-n0": { + "ids": [ + "start-node" + ] + }, + "n0-n0-n0-n1": { + "ids": [ + "start-node" + ] + }, + "n1": { + "ids": [ + "start-node" + ] + }, + "n2": { + "ids": [ + "start-node" + ] + } + } + } + }, + "tasks": [ + { + "template": { + "id": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.double", + "version": "v1" + }, + "type": "python-task", + "metadata": { + "runtime": { + "type": "FLYTE_SDK", + "version": "0.0.0+develop", + "flavor": "python" + }, + "retries": {}, + "interruptible": false + }, + "interface": { + "inputs": { + "variables": { + "n": { + "type": { + "simple": "FLOAT" + }, + "description": "n" + } + } + }, + "outputs": { + "variables": { + "o0": { + "type": { + "simple": "FLOAT" + }, + "description": "o0" + } + } + } + }, + "container": { + "image": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59", + "args": [ + "pyflyte-execute", + "--inputs", + "{{.input}}", + "--output-prefix", + "{{.outputPrefix}}", + "--raw-output-data-prefix", + "{{.rawOutputDataPrefix}}", + "--resolver", + "flytekit.core.python_auto_container.default_task_resolver", + "--", + "task-module", + "core.control_flow.run_conditions", + "task-name", + "double" + ], + "resources": {}, + "env": [ + { + "key": "FLYTE_INTERNAL_CONFIGURATION_PATH", + "value": "/root/sandbox.config" + }, + { + "key": "FLYTE_INTERNAL_IMAGE", + "value": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + ] + } + } + }, + { + "template": { + "id": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.run_conditions.square", + "version": "v1" + }, + "type": "python-task", + "metadata": { + "runtime": { + "type": "FLYTE_SDK", + "version": "0.0.0+develop", + "flavor": "python" + }, + "retries": {}, + "interruptible": false + }, + "interface": { + "inputs": { + "variables": { + "n": { + "type": { + "simple": "FLOAT" + }, + "description": "n" + } + } + }, + "outputs": { + "variables": { + "o0": { + "type": { + "simple": "FLOAT" + }, + "description": "o0" + } + } + } + }, + "container": { + "image": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59", + "args": [ + "pyflyte-execute", + "--inputs", + "{{.input}}", + "--output-prefix", + "{{.outputPrefix}}", + "--raw-output-data-prefix", + "{{.rawOutputDataPrefix}}", + "--resolver", + "flytekit.core.python_auto_container.default_task_resolver", + "--", + "task-module", + "core.control_flow.run_conditions", + "task-name", + "square" + ], + "resources": {}, + "env": [ + { + "key": "FLYTE_INTERNAL_CONFIGURATION_PATH", + "value": "/root/sandbox.config" + }, + { + "key": "FLYTE_INTERNAL_IMAGE", + "value": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + ] + } + } + } + ] +} \ No newline at end of file diff --git a/flytectl/pkg/visualize/testdata/compiled_subworkflows.json b/flytectl/pkg/visualize/testdata/compiled_subworkflows.json new file mode 100644 index 0000000000..8bbf441367 --- /dev/null +++ b/flytectl/pkg/visualize/testdata/compiled_subworkflows.json @@ -0,0 +1,482 @@ +{ + "primary": { + "template": { + "id": { + "resourceType": "WORKFLOW", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.parent_wf", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + }, + "metadata": {}, + "interface": { + "inputs": { + "variables": { + "a": { + "type": { + "simple": "INTEGER" + }, + "description": "a" + } + } + }, + "outputs": { + "variables": { + "o0": { + "type": { + "simple": "INTEGER" + }, + "description": "o0" + }, + "o1": { + "type": { + "simple": "STRING" + }, + "description": "o1" + }, + "o2": { + "type": { + "simple": "STRING" + }, + "description": "o2" + } + } + } + }, + "nodes": [ + { + "id": "start-node" + }, + { + "id": "end-node", + "inputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "node-t1-parent", + "var": "t1_int_output" + } + } + }, + { + "var": "o1", + "binding": { + "promise": { + "nodeId": "n1", + "var": "o0" + } + } + }, + { + "var": "o2", + "binding": { + "promise": { + "nodeId": "n1", + "var": "o1" + } + } + } + ] + }, + { + "id": "node-t1-parent", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.subworkflows.t1", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "a", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "a" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.t1", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + } + }, + { + "id": "n1", + "metadata": { + "name": "flytekit.core.workflow.core.control_flow.subworkflows.my_subwf", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "a", + "binding": { + "promise": { + "nodeId": "node-t1-parent", + "var": "t1_int_output" + } + } + } + ], + "upstreamNodeIds": [ + "node-t1-parent" + ], + "workflowNode": { + "subWorkflowRef": { + "resourceType": "WORKFLOW", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.my_subwf", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + } + } + ], + "outputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "node-t1-parent", + "var": "t1_int_output" + } + } + }, + { + "var": "o1", + "binding": { + "promise": { + "nodeId": "n1", + "var": "o0" + } + } + }, + { + "var": "o2", + "binding": { + "promise": { + "nodeId": "n1", + "var": "o1" + } + } + } + ], + "metadataDefaults": {} + }, + "connections": { + "downstream": { + "n1": { + "ids": [ + "end-node" + ] + }, + "node-t1-parent": { + "ids": [ + "end-node", + "n1" + ] + }, + "start-node": { + "ids": [ + "node-t1-parent" + ] + } + }, + "upstream": { + "end-node": { + "ids": [ + "n1", + "node-t1-parent" + ] + }, + "n1": { + "ids": [ + "node-t1-parent" + ] + }, + "node-t1-parent": { + "ids": [ + "start-node" + ] + } + } + } + }, + "subWorkflows": [ + { + "template": { + "id": { + "resourceType": "WORKFLOW", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.my_subwf", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + }, + "metadata": {}, + "interface": { + "inputs": { + "variables": { + "a": { + "type": { + "simple": "INTEGER" + }, + "description": "a" + } + } + }, + "outputs": { + "variables": { + "o0": { + "type": { + "simple": "STRING" + }, + "description": "o0" + }, + "o1": { + "type": { + "simple": "STRING" + }, + "description": "o1" + } + } + } + }, + "nodes": [ + { + "id": "start-node" + }, + { + "id": "end-node", + "inputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "n0", + "var": "c" + } + } + }, + { + "var": "o1", + "binding": { + "promise": { + "nodeId": "n1", + "var": "c" + } + } + } + ] + }, + { + "id": "n0", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.subworkflows.t1", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "a", + "binding": { + "promise": { + "nodeId": "start-node", + "var": "a" + } + } + } + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.t1", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + } + }, + { + "id": "n1", + "metadata": { + "name": "flytekit.core.python_function_task.core.control_flow.subworkflows.t1", + "retries": {}, + "interruptible": false + }, + "inputs": [ + { + "var": "a", + "binding": { + "promise": { + "nodeId": "n0", + "var": "t1_int_output" + } + } + } + ], + "upstreamNodeIds": [ + "n0" + ], + "taskNode": { + "referenceId": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.t1", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + } + } + ], + "outputs": [ + { + "var": "o0", + "binding": { + "promise": { + "nodeId": "n0", + "var": "c" + } + } + }, + { + "var": "o1", + "binding": { + "promise": { + "nodeId": "n1", + "var": "c" + } + } + } + ], + "metadataDefaults": {} + }, + "connections": { + "downstream": { + "n0": { + "ids": [ + "end-node", + "n1" + ] + }, + "n1": { + "ids": [ + "end-node" + ] + }, + "start-node": { + "ids": [ + "n0" + ] + } + }, + "upstream": { + "end-node": { + "ids": [ + "n0", + "n1" + ] + }, + "n0": { + "ids": [ + "start-node" + ] + }, + "n1": { + "ids": [ + "n0" + ] + } + } + } + } + ], + "tasks": [ + { + "template": { + "id": { + "resourceType": "TASK", + "project": "flytesnacks", + "domain": "development", + "name": "core.control_flow.subworkflows.t1", + "version": "d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + }, + "type": "python-task", + "metadata": { + "runtime": { + "type": "FLYTE_SDK", + "version": "0.0.0+develop", + "flavor": "python" + }, + "retries": {}, + "interruptible": false + }, + "interface": { + "inputs": { + "variables": { + "a": { + "type": { + "simple": "INTEGER" + }, + "description": "a" + } + } + }, + "outputs": { + "variables": { + "c": { + "type": { + "simple": "STRING" + }, + "description": "c" + }, + "t1_int_output": { + "type": { + "simple": "INTEGER" + }, + "description": "t1_int_output" + } + } + } + }, + "container": { + "image": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59", + "args": [ + "pyflyte-execute", + "--inputs", + "{{.input}}", + "--output-prefix", + "{{.outputPrefix}}", + "--raw-output-data-prefix", + "{{.rawOutputDataPrefix}}", + "--resolver", + "flytekit.core.python_auto_container.default_task_resolver", + "--", + "task-module", + "core.control_flow.subworkflows", + "task-name", + "t1" + ], + "resources": {}, + "env": [ + { + "key": "FLYTE_INTERNAL_CONFIGURATION_PATH", + "value": "/root/sandbox.config" + }, + { + "key": "FLYTE_INTERNAL_IMAGE", + "value": "flytecookbook:core-d5fa3ecfaca02f9b83957c68fd5fe3c9082ccc59" + } + ] + } + } + } + ] +} + diff --git a/flytectl/proposal/README.md b/flytectl/proposal/README.md new file mode 100644 index 0000000000..833f01d0bb --- /dev/null +++ b/flytectl/proposal/README.md @@ -0,0 +1,250 @@ +# Introduction + +This document proposes, **Flytectl** as a single CLI that interacts with the FlyteAdmin service. It is proposed to write the CLI in **Golang** and would support both gRPC and REST endpoints of the FlyteAdmin. We will start with gRPC endpoint since the client can be easily generated. In the future, we will work on generating a Swagger-based REST client from the gRPC specification. As we buildmore SDKs in different languages we will support a common way of interacting with the API. This doesn't mean that some SDKs may provide native ways of interacting with the Admin API (e.g. Flytekit), but the intention is to eventually replace **Flytekit/Flytecli** with Flytectl exclusively. + +Flytectl has been designed to deliver user features without having to rely on the UI. Flytectl will follow the standard oauth2 for authentication, supported by FlyteAdmin. Moreover, Flytectl should be readily available on almost any platform - OSX, Linux and Windows. We will strive to keep it relatively lean and fast. + +# Why One CLI? + +As we build multiple SDKs, we need a native way of interacting with the API. Having multiple CLIs makes it hard to keep all of them in sync as we rapidly evolve the API and add more features. + + + + +# Why Golang? + +- Most of Flyte backend is written in Golang. +- Golang offers great CLI tooling support with viper and cobra. +- Golang toolchain helps create cross-compiled small, light weight binary, which is efficient and easy to use. +- We already generate Golang proto and clients for all our IDL. +- We have multiple common libraries available to ease the development of this tool. +- Kubectl is a stellar example of a CLI done well. + +## Generating Swagger code + +We started exploring this (Flytetools)[https://github.com/lyft/flytetools#tools] has some work. The Swagger code-gen maintainer also approached us to see if they could help. + +# API + +## Top level commands + +```bash +$ flytectl [options] + version + configure + get + create + update + delete +``` + +### base options + +- *endpoint* endpoint where Flyteadmin is available +- *insecure* use if Oauth is not available +- optional *project* project for which we need to retrieve details +- optional *domain* domain for which we need to retrieve details +- TBD + +### version + +returns the version of the CLI, version of Admin service, and version of the Platform that is deployed. + +### configure + +Allows configuring Flytectl for your own usage (low pri). Needed for especially storing Auth tokens. + +### get/delete + +Get retrieves a list of resources that is qualified by a further sub-command. for example +```bash +$ flytectl --endpoint "example.flyte.net" get projects +$ flytectl --endpoint "example.flyte.net" --project "p" --domain "d" delete workflows +``` +This returns a list of projects + +To retrieve just one project +```bash +$ flytectl --endpoint "example.flyte.net" get projects +$ flytectl --endpoint "example.flyte.net" --project "p" --domain "d" delete workflows "W1" +``` + +### Create is special + +Create may need more information that can be easily passed in the command line. We recommend using files to create an entity. The file could be in protobuf, jsonpb (json) or jsonpb (yaml) form. + Eventually, we may want to simplify the json and yaml representations, but that is not required in the first pass. We may also want to create just a separate option for that. + +The create for Task and Workflow is essential to what is encompassed in the pyflyte as the registration process. We will decouple the registration process such that the pyflyte, jflyte (other native cli's or +code methods) can dump a serialized representations of the workflows and tasks that are directly consumed by **flytectl**. Thus flytectl is essential in every flow for the user. +#### Create Templatization + +User-facing SDKs can serialize workflow code to protobuf representations, but these will be incomplete. Specifically, the _project_, _domain_, and _version_ parameters must be supplied at create time since these are attributes of the registerable rather than serialized object. Placeholder template variables include: + +* `{{ .project }}` +* `{{ .domain }}` +* `{{ .version }}` +* [auth](https://github.com/flyteorg/flyteidl/blob/c3baba8983019680ef57b6244cea36ba951233ed/protos/flyteidl/admin/common.proto#L241): including the assumable_iam_role and/or kubernetes_service_account +* the [output_location_prefix](https://github.com/flyteorg/flyteidl/blob/c3baba8983019680ef57b6244cea36ba951233ed/protos/flyteidl/admin/common.proto#L250) + +will be included in the serialized protobuf that must be substituted at **create** time. Eventually, the hope is that substitution will be done server-side. + +Furthermore, to reproduce the equivalent **fast-register** code path for the flyte-cli defined in flytekit, an equivalent _fast-create_ command must fill in additional template variables in the [task container args](https://github.com/flyteorg/flyteidl/blob/master/protos/flyteidl/core/tasks.proto#L142). These serialized, templatized args will appear like so: + +``` +"pyflyte-fast-execute", +"--additional-distribution", +"{{ .remote_package_path }}", +"--dest-dir", +"{{ .dest_dir }}", +"--", +"pyflyte-execute", +... +``` + +The `remote package path` is determined by uploading the compressed user code (produced in the serialize step) to a user-specified remote directory (called `additional-distribution-dir` in flytekit). In the case of fast-create the code _version_ arg can be deterministically assigned when serializing the code. Compressed code archives uploaded as individual files to the remote directory can assume the version name to guarantee uniqueness. + +The `dest dir` is an optional argument specified by the user to designate where code is downloaded at execution time. + +![Registration process](https://raw.githubusercontent.com/flyteorg/static-resources/main/flytectl/readme/flytectl_interaction.png) + +### update + +This is a lower priority option as most entities in flyte are immutable and do not support updates. For the ones where updates are supported, we should look into retrieving the existing and allowing editing in an editor, as kubectl edit does. + + + + +# Details of each resource +## Projects + +Projects are top level entity in Flyte. You can fetch multiple projects or one project using the CLI. Think about projects like namespaces. + + - create +```bash +$ flytectl create projects --name "Human readable Name of project" --id project-id --labels key=value --labels key=value --description "long string" +Alternatively +$ flytectl create project -f project.yaml +``` + +```yaml +project.yaml +name: Human readable project name +id: project-x +labels: + - k: v + - k1: v1 +description: | + Long description +``` + - get +```bash +$ flytectl get projects [project-name] [-o yaml | -o json | default -o table] +``` + - update +```bash +$ flytectl update projects --id project-x ... +# You can only update one project at a time +``` + +## Tasks + +- get +```bash +$ flytectl get tasks [task-name] [-o yaml | -o json | default -o table] [--filters...] [--sort-by...] [--selectors...] +``` + - get a specific version and get a template to launch + Create an execution is complicated as the user needs to know all the input types, and a way to simplify this could be to create a YAML template locally from the launchplan (the interface, etc.) +```bash +$ flytectl get task task-name --execution-template -o YAML +yaml.template (TBD) +This is a special version of the get launch-plan which can be executed by passing it to create execution. + +``` + - create + - create + - update + +## Workflows + +Support + - get +```bash +$ flytectl get workflows [workflow-name] [-o yaml | -o json | default -o table] [--filters...] [--sort-by...] [--selectors...] +``` + - create + - update + +## Launch Plans + +Support + - get +```bash +$ flytectl get launch-plans [launchplan-name] [-o yaml | -o json | default -o table] [--filters...] [--sort-by...] [--selectors...] +``` + - get a specific version and get a template to launch + Create an execution is complicated as the user needs to know all the input types, and a way to simplify this could be to create a YAML template locally from the launchplan (the interface, etc.) +```bash +$ flytectl get launch-plans launch-plan-name --execution-template -o YAML +yaml.template (TBD) +This is a special version of the get launch-plan which can be executed by passing it to create execution. + +``` + - create + - update + +## Execution + +Create or retrieve an execution. + - get +Get all executions or get a single execution. +```bash +$ flytectl get execution [exec-name] [-o yaml | -o json | default -o table] [--filters...] [--sort-by...] [--selectors...] +``` +An interesting feature in get-execution might be to filter the execution of a node within the execution only or quickly find the ones that have failed. +Visualizing the execution is also challenging. We may want to visualize +We could use https://graphviz.org/ to visualize the DAG. +Within the DAG, NodeExecutions and corresponding task executions need to be fetched. + - create + Create an execution for a LaunchPlan or a Task. This is very interesting as it should accept inputs for the execution. +```bash +$ flytectl create execution -f template.yaml (see get-template command) +OR +$ flytectl create execution --launch-plan "name" --inputs "key=value" +``` + - delete - here refers to terminate + +## MatchableEntity + +Ability to retrieve the matchable entity and edit its details + - get + - create + - update + +## Outputs + +Support + - get + - create + - update + +# No resource interactions + +## Install all examples + +Today Flytesnacks houses a few examples for Flyte usage in python. When a user wants to get started with Flyte quickly, it would be preferable that all Flytesnacks examples are serialized and stored as artifacts in flytesnacks for every checkin. This can be done for python flytekit using `pyflyte serialize` command. Once they are posted as serialized blobs, flytectl could easily retrieve them and register them in a specific project as desired by the user. + +```bash +$ flytectl examples register-all [cookbook|plugins|--custom-path=remote-path] [--semver semantic-version-of-flytesnacks-examples] --target-project --target-domain +``` +The remote has to follow a protocol. It should be an archive - `tar.gz` with two folders `example-set/ -tasks/*.pb -workflows/*.pb` All the workflows in this path will be installed to the target project/domain. + +## Setup a repository with dockerfile for writing code for Flyte + +Maybe we should look at `boilr` or some other existing framework to do this +```bash +$ flytectl init project --archetype tensorflow-2.0 +$ flytectl init project --archetype spark-3.0 +$ flytectl init project --archetype xgboost +... +``` +All these archetypes should be available in a separate repository for this to work. An archetype is a template with dockerfile and folder setup with flytekit.config. diff --git a/flytectl/proposal/flytectl_interaction.png b/flytectl/proposal/flytectl_interaction.png new file mode 100644 index 0000000000..b08e587195 Binary files /dev/null and b/flytectl/proposal/flytectl_interaction.png differ diff --git a/flytectl/pull_request_template.md b/flytectl/pull_request_template.md new file mode 100644 index 0000000000..9cdab99b46 --- /dev/null +++ b/flytectl/pull_request_template.md @@ -0,0 +1,35 @@ +## _Read then delete this section_ + +_- Make sure to use a concise title for the pull-request._ + +_- Use #patch, #minor or #major in the pull-request title to bump the corresponding version. Otherwise, the patch version +will be bumped. [More details](https://github.com/marketplace/actions/github-tag-bump)_ + +# TL;DR +_Please replace this text with a description of what this PR accomplishes._ + +## Type + - [ ] Bug Fix + - [ ] Feature + - [ ] Plugin + +## Are all requirements met? + + - [ ] Code completed + - [ ] Smoke tested + - [ ] Unit tests added + - [ ] Code documentation added + - [ ] Any pending items have an associated Issue + +## Complete description + _How did you fix the bug, make the feature etc. Link to any design docs etc_ + +## Tracking Issue +_Remove the '*fixes*' keyword if there will be multiple PRs to fix the linked issue_ + +fixes https://github.com/flyteorg/flyte/issues/ + +## Follow-up issue +_NA_ +OR +_https://github.com/flyteorg/flyte/issues/_