From 6d58c73214285f6beacc255eefe27699781d909e Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Sun, 7 Apr 2024 04:52:53 -0700 Subject: [PATCH 01/15] Change phase to WaitingForResources when quota exceeded (#5195) --- .../tasks/pluginmachinery/internal/webapi/allocation_token.go | 4 ++-- .../controller/nodes/task/resourcemanager/config/config.go | 2 +- .../nodes/task/resourcemanager/config/config_flags.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go index a20ef4a1ab..4beaaad91f 100644 --- a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go @@ -64,8 +64,8 @@ func (a tokenAllocator) allocateToken(ctx context.Context, p webapi.AsyncPlugin, return &State{ AllocationTokenRequestStartTime: startTime, Phase: PhaseNotStarted, - }, core.PhaseInfoQueued( - a.clock.Now(), 0, "Quota for task has exceeded. The request is enqueued."), nil + }, core.PhaseInfoWaitingForResourcesInfo( + a.clock.Now(), 0, "Quota for task has exceeded. Waiting for the resource.", nil), nil } return nil, core.PhaseInfo{}, fmt.Errorf("allocation status undefined [%v]", allocationStatus) diff --git a/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config.go b/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config.go index 5a92d3c8e8..8e8acd54b7 100644 --- a/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config.go +++ b/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config.go @@ -27,7 +27,7 @@ var ( // Configs for Resource Manager type Config struct { - Type Type `json:"type" pflag:"noop,Which resource manager to use"` + Type Type `json:"type" pflag:"noop, Which resource manager to use, redis or noop. Default is noop."` ResourceMaxQuota int `json:"resourceMaxQuota" pflag:",Global limit for concurrent Qubole queries"` RedisConfig RedisConfig `json:"redis" pflag:",Config for Redis resourcemanager."` } diff --git a/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config_flags.go b/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config_flags.go index 66bcc0eb13..bc76bb47c5 100755 --- a/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config_flags.go +++ b/flytepropeller/pkg/controller/nodes/task/resourcemanager/config/config_flags.go @@ -50,7 +50,7 @@ func (Config) mustMarshalJSON(v json.Marshaler) string { // flags is json-name.json-sub-name... etc. func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "type"), defaultConfig.Type, "Which resource manager to use") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "type"), defaultConfig.Type, " Which resource manager to use, redis or noop. Default is noop.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "resourceMaxQuota"), defaultConfig.ResourceMaxQuota, "Global limit for concurrent Qubole queries") cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "redis.hostPaths"), defaultConfig.RedisConfig.HostPaths, "Redis hosts locations.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "redis.primaryName"), defaultConfig.RedisConfig.PrimaryName, "Redis primary name, fill in only if you are connecting to a redis sentinel cluster.") From 8380f842b84d5b99f4a503edfca5a65b26ed591a Mon Sep 17 00:00:00 2001 From: "Fabio M. Graetz, Ph.D" Date: Mon, 8 Apr 2024 09:23:06 +0200 Subject: [PATCH 02/15] Fix: Update spark operator helm repository (#5198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabio M. Graetz, Ph.D. Signed-off-by: Fabio Grätz --- docs/deployment/plugins/k8s/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deployment/plugins/k8s/index.rst b/docs/deployment/plugins/k8s/index.rst index 908c986d9b..9cc30b79d5 100644 --- a/docs/deployment/plugins/k8s/index.rst +++ b/docs/deployment/plugins/k8s/index.rst @@ -83,7 +83,7 @@ Select the integration you need and follow the steps to install the correspondin .. code-block:: bash - helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator + helm repo add spark-operator https://kubeflow.github.io/spark-operator To install the Spark operator, run the following command: From 65685827276de4497663f732af4173d2bc698c91 Mon Sep 17 00:00:00 2001 From: Chi-Sheng Liu Date: Mon, 8 Apr 2024 22:14:41 +0800 Subject: [PATCH 03/15] docs(troubleshoot): Add docker error troubleshooting guide (#4972) Signed-off-by: Chi-Sheng Liu Co-authored-by: Kevin Su --- docs/community/troubleshoot.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/community/troubleshoot.rst b/docs/community/troubleshoot.rst index 41bc6624c3..692e3c2aed 100644 --- a/docs/community/troubleshoot.rst +++ b/docs/community/troubleshoot.rst @@ -32,6 +32,31 @@ Depending on the contents of the logs or the `Events`, you can try different thi Debugging common execution errors ---------------------------------- +``Error: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This error will show if you are not running Docker with the native Docker engine in a Linux machine. Most probably you are running Docker via Docker Desktop. + +- If you are using Docker Desktop in MacOs, run: + +.. prompt:: bash $ + + sudo ln -s ~/Library/Containers/com.docker.docker/Data/docker.raw.sock /var/run/docker.sock + +- If you are using Docker Desktop in Linux, run: + +.. prompt:: bash $ + + sudo ln -s ~$USER/.docker/desktop/docker.sock /var/run/docker.sock + +- If you are using another tool to run Docker, you need to make sure that ``/var/run/docker.sock`` is linked to the correct socket file. + + For example, if you are using Rancher Desktop on Linux, run: + + .. prompt:: bash $ + + sudo ln -s ~$USER/.rd/docker.sock /var/run/docker.sock + ``message: '0/1 nodes are available: 1 Insufficient cpu. preemption: 0/1 nodes are available: 1 No preemption victims found for incoming pod.'`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 2528de73ce38b94c15e15a24a3b7416d752dedcc Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:46:50 -0700 Subject: [PATCH 04/15] add cache client read and write otel tracing (#5184) * add cache client read and write otel tracing Signed-off-by: Paul Dittamo * lint Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --- flytepropeller/pkg/controller/nodes/cache.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flytepropeller/pkg/controller/nodes/cache.go b/flytepropeller/pkg/controller/nodes/cache.go index 5d4c8455a5..59cf21057d 100644 --- a/flytepropeller/pkg/controller/nodes/cache.go +++ b/flytepropeller/pkg/controller/nodes/cache.go @@ -21,6 +21,7 @@ import ( "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task" "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flyte/flytestdlib/otelutils" "github.com/flyteorg/flyte/flytestdlib/storage" ) @@ -77,6 +78,8 @@ func updatePhaseCacheInfo(phaseInfo handler.PhaseInfo, cacheStatus *catalog.Stat // CheckCatalogCache uses the handler and contexts to check if cached outputs for the current node // exist. If the exist, this function also copies the outputs to this node. func (n *nodeExecutor) CheckCatalogCache(ctx context.Context, nCtx interfaces.NodeExecutionContext, cacheHandler interfaces.CacheableNodeHandler) (catalog.Entry, error) { + ctx, span := otelutils.NewSpan(ctx, otelutils.FlytePropellerTracer, "pkg.controller.nodes.NodeExecutor/CheckCatalogCache") + defer span.End() catalogKey, err := cacheHandler.GetCatalogKey(ctx, nCtx) if err != nil { return catalog.Entry{}, errors.Wrapf(err, "failed to initialize the catalogKey") @@ -197,6 +200,8 @@ func (n *nodeExecutor) ReleaseCatalogReservation(ctx context.Context, nCtx inter // WriteCatalogCache relays the outputs of this node to the cache. This allows future executions // to reuse these data to avoid recomputation. func (n *nodeExecutor) WriteCatalogCache(ctx context.Context, nCtx interfaces.NodeExecutionContext, cacheHandler interfaces.CacheableNodeHandler) (catalog.Status, error) { + ctx, span := otelutils.NewSpan(ctx, otelutils.FlytePropellerTracer, "pkg.controller.nodes.NodeExecutor/WriteCatalogCache") + defer span.End() catalogKey, err := cacheHandler.GetCatalogKey(ctx, nCtx) if err != nil { return catalog.NewStatus(core.CatalogCacheStatus_CACHE_DISABLED, nil), errors.Wrapf(err, "failed to initialize the catalogKey") From 6a39af7786c5a697bf486667f88d458bf5a2bab2 Mon Sep 17 00:00:00 2001 From: Nikki Everett Date: Tue, 9 Apr 2024 14:43:26 -0500 Subject: [PATCH 05/15] fix link (#5199) Signed-off-by: nikki everett --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 4720be51f7..9370cc9703 100644 --- a/docs/index.md +++ b/docs/index.md @@ -106,7 +106,7 @@ Below are the API reference to the different components of Flyte: - Flyte's official Python SDK. * - {doc}`FlyteCTL ` - Flyte's command-line interface for interacting with a Flyte cluster. -* - {doc}`FlyteIDL ` +* - {doc}`FlyteIDL ` - Flyte's core specification language. ``` From 674367f0c523a8c4432387f9a31727a193df7e6c Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 10 Apr 2024 15:44:52 +0800 Subject: [PATCH 06/15] add SyncTask's timeout setting (#5209) Signed-off-by: Future-Outlier --- .../go/tasks/plugins/webapi/agent/plugin.go | 8 +++++--- .../go/tasks/plugins/webapi/agent/plugin_test.go | 15 +++++++++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go index cc7f15bd80..03c04b4d27 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go @@ -92,12 +92,11 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR taskCategory := admin.TaskCategory{Name: taskTemplate.Type, Version: taskTemplate.TaskTypeVersion} agent, isSync := getFinalAgent(&taskCategory, p.cfg, p.agentRegistry) - finalCtx, cancel := getFinalContext(ctx, "CreateTask", agent) - defer cancel() - taskExecutionMetadata := buildTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) if isSync { + finalCtx, cancel := getFinalContext(ctx, "ExecuteTaskSync", agent) + defer cancel() client, err := p.getSyncAgentClient(ctx, agent) if err != nil { return nil, nil, err @@ -106,6 +105,9 @@ func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextR return p.ExecuteTaskSync(finalCtx, client, header, inputs) } + finalCtx, cancel := getFinalContext(ctx, "CreateTask", agent) + defer cancel() + // Use async agent client client, err := p.getAsyncAgentClient(ctx, agent) if err != nil { diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go index 9fa36c5c42..3e8cb882c8 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin_test.go @@ -75,15 +75,26 @@ func TestPlugin(t *testing.T) { t.Run("test getFinalTimeout", func(t *testing.T) { timeout := getFinalTimeout("CreateTask", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"CreateTask": {Duration: 1 * time.Millisecond}}}) assert.Equal(t, 1*time.Millisecond, timeout.Duration) + timeout = getFinalTimeout("GetTask", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"GetTask": {Duration: 1 * time.Millisecond}}}) + assert.Equal(t, 1*time.Millisecond, timeout.Duration) timeout = getFinalTimeout("DeleteTask", &Deployment{Endpoint: "localhost:8080", DefaultTimeout: config.Duration{Duration: 10 * time.Second}}) assert.Equal(t, 10*time.Second, timeout.Duration) + timeout = getFinalTimeout("ExecuteTaskSync", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"ExecuteTaskSync": {Duration: 1 * time.Millisecond}}}) + assert.Equal(t, 1*time.Millisecond, timeout.Duration) }) t.Run("test getFinalContext", func(t *testing.T) { - ctx, _ := getFinalContext(context.TODO(), "DeleteTask", &Deployment{}) + + ctx, _ := getFinalContext(context.TODO(), "CreateTask", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"CreateTask": {Duration: 1 * time.Millisecond}}}) + assert.NotEqual(t, context.TODO(), ctx) + + ctx, _ = getFinalContext(context.TODO(), "GetTask", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"GetTask": {Duration: 1 * time.Millisecond}}}) + assert.NotEqual(t, context.TODO(), ctx) + + ctx, _ = getFinalContext(context.TODO(), "DeleteTask", &Deployment{}) assert.Equal(t, context.TODO(), ctx) - ctx, _ = getFinalContext(context.TODO(), "CreateTask", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"CreateTask": {Duration: 1 * time.Millisecond}}}) + ctx, _ = getFinalContext(context.TODO(), "ExecuteTaskSync", &Deployment{Endpoint: "localhost:8080", Timeouts: map[string]config.Duration{"ExecuteTaskSync": {Duration: 10 * time.Second}}}) assert.NotEqual(t, context.TODO(), ctx) }) From 1ac8bbe8fab8cbde3f40f98ea307e5a3c75bb4fa Mon Sep 17 00:00:00 2001 From: Future-Outlier Date: Wed, 10 Apr 2024 15:46:07 +0800 Subject: [PATCH 07/15] [easy] [flyteagent] Add `agent-service` endpoint settings for `flyte-core` deployment (#5208) Signed-off-by: Future-Outlier --- charts/flyte/README.md | 8 ++++---- charts/flyte/values.yaml | 2 +- deployment/sandbox/flyte_helm_generated.yaml | 6 +++--- docker/sandbox-bundled/manifests/complete-agent.yaml | 4 ++-- docker/sandbox-bundled/manifests/complete.yaml | 4 ++-- docker/sandbox-bundled/manifests/dev.yaml | 4 ++-- docs/deployment/agents/chatgpt.rst | 12 ++++++++++++ docs/flyte_agents/developing_agents.md | 10 ++++++---- .../testing_agents_in_a_local_development_cluster.md | 9 +++++++-- 9 files changed, 39 insertions(+), 20 deletions(-) diff --git a/charts/flyte/README.md b/charts/flyte/README.md index c1820f5a3a..8dad6db9a5 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.11.1-b1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"bigquery_query_job_task":"agent-service","container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.11.1-b1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.11.1-b1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.10.3"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.11.1-b1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.11.1-b1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.11.1-b1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.11.1-b1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.11.1-b1"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.10.3"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.11.1-b1"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.11.1-b1"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -91,7 +91,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.common.ingress.separateGrpcIngressAnnotations | object | `{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"}` | - Extra Ingress annotations applied only to the GRPC ingress. Only makes sense if `separateGrpcIngress` is enabled. | | flyte.common.ingress.tls | object | `{"enabled":false}` | - TLS Settings | | flyte.common.ingress.webpackHMR | bool | `true` | - Enable or disable HMR route to flyteconsole. This is useful only for frontend development. | -| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.11.1-b1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"bigquery_query_job_task":"agent-service","container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | +| flyte.configmap | object | `{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.11.1-b1","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}}` | ----------------------------------------------------------------- CONFIGMAPS SETTINGS | | flyte.configmap.adminServer | object | `{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}}` | FlyteAdmin server configuration | | flyte.configmap.adminServer.auth | object | `{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}}` | Authentication configuration | | flyte.configmap.adminServer.server.security.secure | bool | `false` | Controls whether to serve requests over SSL/TLS. | @@ -104,8 +104,8 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.configmap.core.propeller | object | `{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"}` | follows the structure specified [here](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/config). | | flyte.configmap.datacatalogServer | object | `{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}}` | Datacatalog server config | | flyte.configmap.domain | object | `{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]}` | Domains configuration for Flyte projects. This enables the specified number of domains across all projects in Flyte. | -| flyte.configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"bigquery_query_job_task":"agent-service","container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | -| flyte.configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"bigquery_query_job_task":"agent-service","container":"container","container_array":"k8s-array","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | +| flyte.configmap.enabled_plugins.tasks | object | `{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}}` | Tasks specific configuration [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#GetConfig) | +| flyte.configmap.enabled_plugins.tasks.task-plugins | object | `{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service"]}` | Plugins configuration, [structure](https://pkg.go.dev/github.com/flyteorg/flytepropeller/pkg/controller/nodes/task/config#TaskPluginConfig) | | flyte.configmap.enabled_plugins.tasks.task-plugins.enabled-plugins | list | `["container","sidecar","k8s-array","agent-service"]` | [Enabled Plugins](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/config#Config). Enable sagemaker*, athena if you install the backend plugins | | flyte.configmap.k8s | object | `{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}}` | Kubernetes specific Flyte configuration | | flyte.configmap.k8s.plugins.k8s | object | `{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}` | Configuration section for all K8s specific plugins [Configuration structure](https://pkg.go.dev/github.com/lyft/flyteplugins/go/tasks/pluginmachinery/flytek8s/config) | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index 71c9d7a9af..8ca0b0a7fe 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -532,7 +532,7 @@ flyte: container: container sidecar: sidecar container_array: k8s-array - bigquery_query_job_task: agent-service + sensor: agent-service # -- Kubernetes specific Flyte configuration diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index e37cea4f37..87270cd8a1 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -634,9 +634,9 @@ data: tasks: task-plugins: default-for-task-types: - bigquery_query_job_task: agent-service container: container container_array: k8s-array + sensor: agent-service sidecar: sidecar enabled-plugins: - container @@ -7173,7 +7173,7 @@ spec: template: metadata: annotations: - configChecksum: "4fd54a75274d84bbb9a90cc421f7aece12c202911984a436a9ec5fe52e942eb" + configChecksum: "673119651fe870e114e1b95cfbc27a6e5c2418215569ab9d0b9451385c32a51" labels: app.kubernetes.io/name: flytepropeller app.kubernetes.io/instance: flyte @@ -7247,7 +7247,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.11.1-b1 annotations: - configChecksum: "4fd54a75274d84bbb9a90cc421f7aece12c202911984a436a9ec5fe52e942eb" + configChecksum: "673119651fe870e114e1b95cfbc27a6e5c2418215569ab9d0b9451385c32a51" spec: securityContext: fsGroup: 65534 diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index ad926e6109..2db8226933 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -816,7 +816,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: SDRTOVJwQzU0WURYTG1NbQ== + haSharedSecret: WEJxZGNCTWJiNFUwZEd1bQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1412,7 +1412,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 1d977a1daf6338c6d55444d6c0565a40353efd71d0a8bef422cfc6387b20a39f + checksum/secret: 45571013e2bcbc17744162363ccc7fc5c99072b2553a3fe29f41660a07e2e864 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 13dc038d3f..98b5aa3657 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -796,7 +796,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: MGs1QlJSY2VKM3I0cEQ2bw== + haSharedSecret: a1Zqcm1HUlcxZUN4SExqSw== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1360,7 +1360,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: d2a40d222d6f4b81e6186400d7fc9818c90e07068ccc2569cfdb212ad7782e98 + checksum/secret: bf4abdac7aaf5a74cf2f12f7511d4af46e5c2b8228637c5eb29f9ed5399e4942 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index c258d27b9c..49c259b0d9 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: SVFrS2JhOWVndXFEYlE3WA== + haSharedSecret: dkdTTTV4ZkNpc2pneXZBRQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: b5ff29721af068e75a80eff30c7402def61a64a87c73e8e716d5d06cf05c4bd8 + checksum/secret: 40299f1a8b9fffd1ef10051c289e1d654de7f755fff5f44cde65a9cf96bcd543 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/deployment/agents/chatgpt.rst b/docs/deployment/agents/chatgpt.rst index a810bf5fdf..afc569222f 100644 --- a/docs/deployment/agents/chatgpt.rst +++ b/docs/deployment/agents/chatgpt.rst @@ -38,6 +38,12 @@ Specify agent configuration agent-service: supportedTaskTypes: - chatgpt + # Configuring the timeout is optional. + # Tasks like using ChatGPT with a large model might require a longer time, + # so we have the option to adjust the timeout setting here. + defaultAgent: + timeouts: + ExecuteTaskSync: 10s .. group-tab:: Flyte core @@ -66,6 +72,12 @@ Specify agent configuration agent-service: supportedTaskTypes: - chatgpt + # Configuring the timeout is optional. + # Tasks like using ChatGPT with a large model might require a longer time, + # so we have the option to adjust the timeout setting here. + defaultAgent: + timeouts: + ExecuteTaskSync: 10s Add the OpenAI API token ------------------------------- diff --git a/docs/flyte_agents/developing_agents.md b/docs/flyte_agents/developing_agents.md index 9df49c2f8b..fe55630248 100644 --- a/docs/flyte_agents/developing_agents.md +++ b/docs/flyte_agents/developing_agents.md @@ -184,10 +184,7 @@ kubectl set image deployment/flyteagent flyteagent=ghcr.io/flyteorg/flyteagent:l kubectl rollout restart deployment flytepropeller -n flyte ``` -### 5. - - -### Canary deployment +### 5. Canary deployment Agents can be deployed independently in separate environments. Decoupling agents from the production environment ensures that if any specific agent encounters an error or issue, it will not impact the overall production system. @@ -210,7 +207,12 @@ you can route particular task requests to designated agent services by adjusting endpoint: "dns:///flyteagent.flyte.svc.cluster.local:8000" insecure: true timeouts: + # CreateTask, GetTask and DeleteTask are for async agents. + # ExecuteTaskSync is for sync agents. + CreateTask: 5s GetTask: 5s + DeleteTask: 5s + ExecuteTaskSync: 10s defaultTimeout: 10s agents: custom_agent: diff --git a/docs/flyte_agents/testing_agents_in_a_local_development_cluster.md b/docs/flyte_agents/testing_agents_in_a_local_development_cluster.md index be2c14210d..940385b2fb 100644 --- a/docs/flyte_agents/testing_agents_in_a_local_development_cluster.md +++ b/docs/flyte_agents/testing_agents_in_a_local_development_cluster.md @@ -57,7 +57,7 @@ tasks: - sidecar - K8S-ARRAY default-for-task-types: - - bigquery_query_job_task: agent-service + - sensor: agent-service - container: container - container_array: K8S-ARRAY ``` @@ -69,7 +69,12 @@ plugins: endpoint: "localhost:8000" # your grpc agent server port insecure: true timeouts: - GetTask: 10s + # CreateTask, GetTask and DeleteTask are for async agents. + # ExecuteTaskSync is for sync agents. + CreateTask: 5s + GetTask: 5s + DeleteTask: 5s + ExecuteTaskSync: 10s defaultTimeout: 10s ``` From 8ef5ea9209d1c455a77239d2e32b1c8fcc38cdff Mon Sep 17 00:00:00 2001 From: David Espejo <82604841+davidmirror-ops@users.noreply.github.com> Date: Wed, 10 Apr 2024 12:32:36 -0500 Subject: [PATCH 08/15] Update Monitoring documentation (#5206) * Add reference to prom operator install guide Signed-off-by: davidmirror-ops * Adds info about the three base dashboards Signed-off-by: davidmirror-ops * Adds instructions to enable SMs Signed-off-by: davidmirror-ops * Incorporate reviews Signed-off-by: davidmirror-ops * Minor fixes Signed-off-by: davidmirror-ops * Improve format for steps Signed-off-by: davidmirror-ops --------- Signed-off-by: davidmirror-ops --- docs/deployment/configuration/monitoring.rst | 56 +++++++++++++++++--- 1 file changed, 48 insertions(+), 8 deletions(-) diff --git a/docs/deployment/configuration/monitoring.rst b/docs/deployment/configuration/monitoring.rst index 75bc89adc4..7b0d9ddc0b 100644 --- a/docs/deployment/configuration/monitoring.rst +++ b/docs/deployment/configuration/monitoring.rst @@ -85,15 +85,55 @@ Use Published Dashboards to Monitor Flyte Deployment Flyte Backend is written in Golang and exposes stats using Prometheus. The stats are labeled with workflow, task, project & domain, wherever appropriate. -The dashboards are divided into two types: +Both ``flyteadmin`` and ``flytepropeller`` are instrumented to expose metrics. To visualize these metrics, Flyte provides three Grafana dashboards, each with a different focus: - **User-facing dashboards**: Dashboards that can be used to triage/investigate/observe performance and characteristics of workflows and tasks. - The user-facing dashboard is published under Grafana marketplace ID `13980 `__. + The user-facing dashboard is published under ID `13980 `__ in the Grafana marketplace. -- **System Dashboards**: Dashboards that are useful for the system maintainer to maintain their Flyte deployments. These are further divided into: - - DataPlane/FlytePropeller dashboards published @ `13979 `__ - - ControlPlane/Flyteadmin dashboards published @ `13981 `__ +- **System Dashboards**: Dashboards that are useful for the system maintainer to investigate the status and performance of their Flyte deployments. These are further divided into: + - `DataPlane/FlytePropeller `__: execution engine status and performance. + - `ControlPlane/Flyteadmin `__: API-level monitoring. + +The corresponding JSON files for each dashboard are also located at ``deployment/stats/prometheus``. + +.. note:: + + The dashboards are basic dashboards and do not include all the metrics exposed by Flyte. + Feel free to use the scripts provided `here `__ to improve and -hopefully- contribute the improved dashboards. + +How to use the dashboards +~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. We recommend installing and configuring the Prometheus operator as described in `their docs `__. +This is especially true if you plan to use the Service Monitors provided by the `flyte-core `__ Helm chart. + +2. Enable the Prometheus instance to use Service Monitors in the namespace where Flyte is running, configuring the following keys in the ``prometheus`` resource: + +.. code-block:: yaml + + spec: + serviceMonitorSelector: {} + serviceMonitorNamespaceSelector: {} + +.. note:: + + The above example configuration lets Prometheus use any ``ServiceMonitor`` in any namespace in the cluster. Adjust the configuration to reduce the scope if needed. + +3. Once you have installed and configured the Prometheus operator, enable the Service Monitors in the Helm chart by configuring the following keys in your ``values`` file: + +.. code-block:: yaml + + flyteadmin: + serviceMonitor: + enabled: true + + flytepropeller: + serviceMonitor: + enabled: true + +.. note:: + + By default, the ``ServiceMonitor`` is configured with a ``scrapeTimeout`` of 30s and and ``interval`` of 60s. You can customize these values if needed. + +With the above configuration in place you should be able to import the dashboards in your Grafana instance. -The above mentioned are basic dashboards and do no include all the metrics exposed by Flyte. -Please help us improve the dashboards by contributing to them 🙏. -Refer to the build scripts `here `__. \ No newline at end of file From dd67ff09284f1167bc397a207506ba7ca2f3b947 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Wed, 10 Apr 2024 14:18:43 -0700 Subject: [PATCH 09/15] chore: remove obsolete flyte config files (#5196) * chore: remove obsolete flyte config files * nit Signed-off-by: Kevin Su --------- Signed-off-by: Kevin Su --- flyte.yaml | 106 ------------------------------------ flyte_local.yaml | 110 -------------------------------------- flyte_local_k3d.yaml | 104 ----------------------------------- script/setup_local_dev.sh | 4 +- 4 files changed, 2 insertions(+), 322 deletions(-) delete mode 100644 flyte.yaml delete mode 100644 flyte_local.yaml delete mode 100644 flyte_local_k3d.yaml diff --git a/flyte.yaml b/flyte.yaml deleted file mode 100644 index 9835ce0c9d..0000000000 --- a/flyte.yaml +++ /dev/null @@ -1,106 +0,0 @@ -# This is a sample configuration file. -# Real configuration when running inside K8s (local or otherwise) lives in a ConfigMap -# Look in the artifacts directory in the flyte repo for what's actually run -# https://github.com/lyft/flyte/blob/b47565c9998cde32b0b5f995981e3f3c990fa7cd/artifacts/flyteadmin.yaml#L72 -propeller: - rawoutput-prefix: "s3://my-s3-bucket/test/" - kube-config: "/Users/ytong/.flyte/state/kubeconfig" - create-flyteworkflow-crd: true -webhook: - certDir: /tmp/k8s-webhook-server/serving-certs - serviceName: flyte-pod-webhook - localCert: true - servicePort: 30090 - ListenPort: 30090 -tasks: - task-plugins: - enabled-plugins: - - container - - sidecar - - K8S-ARRAY - default-for-task-types: - - container: container - - container_array: K8S-ARRAY -server: - kube-config: "/Users/ytong/.flyte/state/kubeconfig" - httpPort: 30080 - serviceHttpEndpoint: http://localhost:30080/ - grpc: - port: 30081 -flyteadmin: - runScheduler: false -database: - postgres: - port: 30089 - username: postgres - host: localhost - dbname: flyteadmin - options: "sslmode=disable" -storage: - type: minio - connection: - access-key: minio - auth-type: accesskey - secret-key: miniostorage - disable-ssl: true - endpoint: "http://localhost:30084" - region: my-region - cache: - max_size_mbs: 10 - target_gc_percent: 100 - container: "my-s3-bucket" -Logger: - show-source: true - level: 4 -admin: - endpoint: localhost:30081 - insecure: true -plugins: - # All k8s plugins default configuration - k8s: - inject-finalizer: true - default-env-vars: - - AWS_METADATA_SERVICE_TIMEOUT: 5 - - AWS_METADATA_SERVICE_NUM_ATTEMPTS: 20 - - FLYTE_AWS_ENDPOINT: "http://minio.flyte:9000" - - FLYTE_AWS_ACCESS_KEY_ID: minio - - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage - # Logging configuration - logs: - kubernetes-enabled: true - kubernetes-url: "http://localhost:30082" - kubernetes-template-uri: "http://localhost:30082/#/log/{{ .namespace }}/{{ .podName }}/pod?namespace={{ .namespace }}" -cluster_resources: - refreshInterval: 5m - templatePath: "/etc/flyte/clusterresource/templates" - # -- Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints - standaloneDeployment: false - customData: - - production: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - staging: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - development: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - refresh: 5m -task_resources: - defaults: - cpu: 500m - memory: 1Gi - limits: - cpu: 2 - memory: 4Gi - gpu: 5 -catalog-cache: - endpoint: localhost:8081 - insecure: true - type: datacatalog diff --git a/flyte_local.yaml b/flyte_local.yaml deleted file mode 100644 index bcb2333440..0000000000 --- a/flyte_local.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# This is a sample configuration file. -# Real configuration when running inside K8s (local or otherwise) lives in a ConfigMap -# Look in the artifacts directory in the flyte repo for what's actually run -# https://github.com/lyft/flyte/blob/b47565c9998cde32b0b5f995981e3f3c990fa7cd/artifacts/flyteadmin.yaml#L72 -# Flyte clusters can be run locally with this configuration -# flytectl demo start --dev -# flyte start --config flyte_local.yaml -propeller: - rawoutput-prefix: "s3://my-s3-bucket/test/" - kube-config: "$HOME/.flyte/sandbox/kubeconfig" - create-flyteworkflow-crd: true -webhook: - certDir: /tmp/k8s-webhook-server/serving-certs - serviceName: flyte-pod-webhook - localCert: true - servicePort: 9443 -tasks: - task-plugins: - enabled-plugins: - - container - - sidecar - - K8S-ARRAY - default-for-task-types: - - container: container - - container_array: K8S-ARRAY -server: - kube-config: "$HOME/.flyte/sandbox/kubeconfig" -flyteadmin: - runScheduler: false -database: - postgres: - port: 30001 - username: postgres - password: postgres - host: localhost - dbname: flyteadmin - options: "sslmode=disable" -storage: - type: minio - connection: - access-key: minio - auth-type: accesskey - secret-key: miniostorage - disable-ssl: true - endpoint: "http://localhost:30002" - region: my-region - cache: - max_size_mbs: 10 - target_gc_percent: 100 - container: "my-s3-bucket" -Logger: - show-source: true - level: 5 -admin: - endpoint: localhost:8089 - insecure: true -plugins: - # All k8s plugins default configuration - k8s: - inject-finalizer: true - default-env-vars: - - AWS_METADATA_SERVICE_TIMEOUT: 5 - - AWS_METADATA_SERVICE_NUM_ATTEMPTS: 20 - - FLYTE_AWS_ENDPOINT: "http://flyte-sandbox-minio.flyte:9000" - - FLYTE_AWS_ACCESS_KEY_ID: minio - - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage - # Logging configuration - logs: - kubernetes-enabled: true - kubernetes-template-uri: http://localhost:30080/kubernetes-dashboard/#/log/{{.namespace }}/{{ .podName }}/pod?namespace={{ .namespace }} -cluster_resources: - refreshInterval: 5m - templatePath: "/etc/flyte/clusterresource/templates" - # -- Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints - standaloneDeployment: false - customData: - - production: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - staging: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - development: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - refresh: 5m -flyte: - admin: - disableClusterResourceManager: true - disableScheduler: true - propeller: - disableWebhook: true -task_resources: - defaults: - cpu: 500m - memory: 1Gi - limits: - cpu: 2 - memory: 4Gi - gpu: 5 -catalog-cache: - endpoint: localhost:8081 - insecure: true - type: datacatalog diff --git a/flyte_local_k3d.yaml b/flyte_local_k3d.yaml deleted file mode 100644 index df15b58708..0000000000 --- a/flyte_local_k3d.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# This is a sample configuration file. -# Real configuration when running inside K8s (local or otherwise) lives in a ConfigMap -# Look in the artifacts directory in the flyte repo for what's actually run -# https://github.com/lyft/flyte/blob/b47565c9998cde32b0b5f995981e3f3c990fa7cd/artifacts/flyteadmin.yaml#L72 -propeller: - rawoutput-prefix: "s3://my-s3-bucket/test/" - kube-config: "$HOME/.k3d/kubeconfig-flyte.yaml" - create-flyteworkflow-crd: true -webhook: - certDir: /tmp/k8s-webhook-server/serving-certs - serviceName: flyte-pod-webhook - localCert: true - servicePort: 9443 -tasks: - task-plugins: - enabled-plugins: - - container - - sidecar - - K8S-ARRAY - default-for-task-types: - - container: container - - container_array: K8S-ARRAY -server: - kube-config: "$HOME/.k3d/kubeconfig-flyte.yaml" -flyteadmin: - runScheduler: false -database: - postgres: - port: 30089 - username: postgres - host: localhost - dbname: flyteadmin - options: "sslmode=disable" -storage: - type: minio - connection: - access-key: minio - auth-type: accesskey - secret-key: miniostorage - disable-ssl: true - endpoint: "http://localhost:30084" - region: my-region - cache: - max_size_mbs: 10 - target_gc_percent: 100 - container: "my-s3-bucket" -Logger: - show-source: true - level: 5 -admin: - endpoint: localhost:8089 - insecure: true -plugins: - # All k8s plugins default configuration - k8s: - inject-finalizer: true - default-env-vars: - - AWS_METADATA_SERVICE_TIMEOUT: 5 - - AWS_METADATA_SERVICE_NUM_ATTEMPTS: 20 - - FLYTE_AWS_ENDPOINT: "http://minio.flyte:9000" - - FLYTE_AWS_ACCESS_KEY_ID: minio - - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage - # Logging configuration - logs: - kubernetes-enabled: true - kubernetes-url: "http://localhost:30082" -cluster_resources: - refreshInterval: 5m - templatePath: "/etc/flyte/clusterresource/templates" - # -- Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints - standaloneDeployment: false - customData: - - production: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - staging: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - - development: - - projectQuotaCpu: - value: "8" - - projectQuotaMemory: - value: "16Gi" - refresh: 5m -task_resources: - defaults: - cpu: 500m - memory: 1Gi - limits: - cpu: 2 - memory: 4Gi - gpu: 5 -flyte: - admin: - disableClusterResourceManager: true - disableScheduler: true -catalog-cache: - endpoint: localhost:8081 - insecure: true - type: datacatalog \ No newline at end of file diff --git a/script/setup_local_dev.sh b/script/setup_local_dev.sh index 2889738656..9fbefeb9c1 100644 --- a/script/setup_local_dev.sh +++ b/script/setup_local_dev.sh @@ -8,7 +8,7 @@ : "${K3D_INSTALL_URL:="https://raw.githubusercontent.com/rancher/k3d/main/install.sh"}" # URL to k3d installer script : "${K3S_VERSION:="v1.21.1-k3s1"}" # version of k3s to run in k3d cluster, empty value uses default specified by k3d install : "${K3D_CLUSTER_NAME:="flyte"}" # name of k3d cluster to be used -: "${K3D_KUBECONFIG_FILE_PATH:="${HOME}/.k3d/kubeconfig-${K3D_CLUSTER_NAME}.yaml"}" # file path to store kubeconfig file for k3d cluster at +: "${K3D_KUBECONFIG_FILE_PATH:="${HOME}/.flyte/sandbox/kubeconfig"}" # file path to store kubeconfig file for k3d cluster at : "${KUBECTL_VERSION:=""}" # version of kubectl to install, empty value uses latest available : "${KUBECTL_INSTALL_URL:="https://dl.k8s.io/release/VERSION/bin/linux/amd64/kubectl"}" # URL to kubectl binary, include VERSION to be replaced with KUBECTL_VERSION @@ -170,4 +170,4 @@ kubectl wait --for=condition=available deployment/minio deployment/postgres -n $ mkdir -p /tmp/k8s-webhook-server/serving-certs echo -e "\n*** Successfully set up local development environment." -echo -e "*** Run \"flyte start --config flyte_local_k3d.yaml\" to start your local flyte installation.\n" +echo -e "*** Run \"flyte start --config flyte-single-binary-local.yaml\" to start your local flyte installation.\n" From 6be49e820c8e1028191b6d629a2ca728eb08d5c4 Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Thu, 11 Apr 2024 03:58:55 -0400 Subject: [PATCH 10/15] Generate rust grpc using tonic (#5187) Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario --- flyteidl/buf.gen.yaml | 2 + flyteidl/gen/pb_rust/datacatalog.rs | 3 +- flyteidl/gen/pb_rust/datacatalog.tonic.rs | 999 ++ flyteidl/gen/pb_rust/flyteidl.cacheservice.rs | 3 +- .../pb_rust/flyteidl.cacheservice.tonic.rs | 619 ++ flyteidl/gen/pb_rust/flyteidl.service.rs | 3 +- .../gen/pb_rust/flyteidl.service.tonic.rs | 8850 +++++++++++++++++ 7 files changed, 10476 insertions(+), 3 deletions(-) create mode 100644 flyteidl/gen/pb_rust/datacatalog.tonic.rs create mode 100644 flyteidl/gen/pb_rust/flyteidl.cacheservice.tonic.rs create mode 100644 flyteidl/gen/pb_rust/flyteidl.service.tonic.rs diff --git a/flyteidl/buf.gen.yaml b/flyteidl/buf.gen.yaml index 8bea491bf4..903801000a 100644 --- a/flyteidl/buf.gen.yaml +++ b/flyteidl/buf.gen.yaml @@ -10,6 +10,8 @@ plugins: out: gen/pb_python - plugin: buf.build/community/neoeinstein-prost out: gen/pb_rust + - plugin: buf.build/community/neoeinstein-tonic:v0.4.0 + out: gen/pb_rust - plugin: buf.build/protocolbuffers/go:v1.30.0 out: gen/pb-go opt: diff --git a/flyteidl/gen/pb_rust/datacatalog.rs b/flyteidl/gen/pb_rust/datacatalog.rs index ac2c695cab..f181704954 100644 --- a/flyteidl/gen/pb_rust/datacatalog.rs +++ b/flyteidl/gen/pb_rust/datacatalog.rs @@ -562,4 +562,5 @@ pub mod pagination_options { } } } -// @@protoc_insertion_point(module) +include!("datacatalog.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/flyteidl/gen/pb_rust/datacatalog.tonic.rs b/flyteidl/gen/pb_rust/datacatalog.tonic.rs new file mode 100644 index 0000000000..e07a5269c4 --- /dev/null +++ b/flyteidl/gen/pb_rust/datacatalog.tonic.rs @@ -0,0 +1,999 @@ +// @generated +/// Generated client implementations. +pub mod data_catalog_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct DataCatalogClient { + inner: tonic::client::Grpc, + } + impl DataCatalogClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataCatalogClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataCatalogClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DataCatalogClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn create_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/CreateDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateDataset")); + self.inner.unary(req, path, codec).await + } + pub async fn get_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetDataset")); + self.inner.unary(req, path, codec).await + } + pub async fn create_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/CreateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateArtifact")); + self.inner.unary(req, path, codec).await + } + pub async fn get_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetArtifact")); + self.inner.unary(req, path, codec).await + } + pub async fn add_tag( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/AddTag", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "AddTag")); + self.inner.unary(req, path, codec).await + } + pub async fn list_artifacts( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ListArtifacts", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListArtifacts")); + self.inner.unary(req, path, codec).await + } + pub async fn list_datasets( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ListDatasets", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListDatasets")); + self.inner.unary(req, path, codec).await + } + pub async fn update_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/UpdateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "UpdateArtifact")); + self.inner.unary(req, path, codec).await + } + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("datacatalog.DataCatalog", "GetOrExtendReservation"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("datacatalog.DataCatalog", "ReleaseReservation"), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod data_catalog_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with DataCatalogServer. + #[async_trait] + pub trait DataCatalog: Send + Sync + 'static { + async fn create_dataset( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_dataset( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn create_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn add_tag( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn list_artifacts( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn list_datasets( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn update_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_or_extend_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn release_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct DataCatalogServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl DataCatalogServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for DataCatalogServer + where + T: DataCatalog, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/datacatalog.DataCatalog/CreateDataset" => { + #[allow(non_camel_case_types)] + struct CreateDatasetSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for CreateDatasetSvc { + type Response = super::CreateDatasetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_dataset(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateDatasetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/GetDataset" => { + #[allow(non_camel_case_types)] + struct GetDatasetSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetDatasetSvc { + type Response = super::GetDatasetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_dataset(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetDatasetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/CreateArtifact" => { + #[allow(non_camel_case_types)] + struct CreateArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for CreateArtifactSvc { + type Response = super::CreateArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/GetArtifact" => { + #[allow(non_camel_case_types)] + struct GetArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetArtifactSvc { + type Response = super::GetArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/AddTag" => { + #[allow(non_camel_case_types)] + struct AddTagSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for AddTagSvc { + type Response = super::AddTagResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::add_tag(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = AddTagSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/ListArtifacts" => { + #[allow(non_camel_case_types)] + struct ListArtifactsSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ListArtifactsSvc { + type Response = super::ListArtifactsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_artifacts(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListArtifactsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/ListDatasets" => { + #[allow(non_camel_case_types)] + struct ListDatasetsSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ListDatasetsSvc { + type Response = super::ListDatasetsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_datasets(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListDatasetsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/UpdateArtifact" => { + #[allow(non_camel_case_types)] + struct UpdateArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for UpdateArtifactSvc { + type Response = super::UpdateArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/GetOrExtendReservation" => { + #[allow(non_camel_case_types)] + struct GetOrExtendReservationSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetOrExtendReservationSvc { + type Response = super::GetOrExtendReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_extend_reservation( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetOrExtendReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/datacatalog.DataCatalog/ReleaseReservation" => { + #[allow(non_camel_case_types)] + struct ReleaseReservationSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ReleaseReservationSvc { + type Response = super::ReleaseReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::release_reservation(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ReleaseReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for DataCatalogServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for DataCatalogServer { + const NAME: &'static str = "datacatalog.DataCatalog"; + } +} diff --git a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs index 9f2122fc4e..ff3264c633 100644 --- a/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs +++ b/flyteidl/gen/pb_rust/flyteidl.cacheservice.rs @@ -169,4 +169,5 @@ pub struct ReleaseReservationRequest { #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReleaseReservationResponse { } -// @@protoc_insertion_point(module) +include!("flyteidl.cacheservice.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/flyteidl/gen/pb_rust/flyteidl.cacheservice.tonic.rs b/flyteidl/gen/pb_rust/flyteidl.cacheservice.tonic.rs new file mode 100644 index 0000000000..5138db427a --- /dev/null +++ b/flyteidl/gen/pb_rust/flyteidl.cacheservice.tonic.rs @@ -0,0 +1,619 @@ +// @generated +/// Generated client implementations. +pub mod cache_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct CacheServiceClient { + inner: tonic::client::Grpc, + } + impl CacheServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CacheServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CacheServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + CacheServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Get", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Get")); + self.inner.unary(req, path, codec).await + } + pub async fn put( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Put", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Put")); + self.inner.unary(req, path, codec).await + } + pub async fn delete( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Delete", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Delete")); + self.inner.unary(req, path, codec).await + } + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.cacheservice.CacheService", + "GetOrExtendReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.cacheservice.CacheService", + "ReleaseReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod cache_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with CacheServiceServer. + #[async_trait] + pub trait CacheService: Send + Sync + 'static { + async fn get( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn put( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_or_extend_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn release_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct CacheServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl CacheServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for CacheServiceServer + where + T: CacheService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.cacheservice.CacheService/Get" => { + #[allow(non_camel_case_types)] + struct GetSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for GetSvc { + type Response = super::GetCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.cacheservice.CacheService/Put" => { + #[allow(non_camel_case_types)] + struct PutSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for PutSvc { + type Response = super::PutCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::put(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = PutSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.cacheservice.CacheService/Delete" => { + #[allow(non_camel_case_types)] + struct DeleteSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for DeleteSvc { + type Response = super::DeleteCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.cacheservice.CacheService/GetOrExtendReservation" => { + #[allow(non_camel_case_types)] + struct GetOrExtendReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for GetOrExtendReservationSvc { + type Response = super::GetOrExtendReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_extend_reservation( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetOrExtendReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.cacheservice.CacheService/ReleaseReservation" => { + #[allow(non_camel_case_types)] + struct ReleaseReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for ReleaseReservationSvc { + type Response = super::ReleaseReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::release_reservation(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ReleaseReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for CacheServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for CacheServiceServer { + const NAME: &'static str = "flyteidl.cacheservice.CacheService"; + } +} diff --git a/flyteidl/gen/pb_rust/flyteidl.service.rs b/flyteidl/gen/pb_rust/flyteidl.service.rs index b5a6b8b052..2fb065da4e 100644 --- a/flyteidl/gen/pb_rust/flyteidl.service.rs +++ b/flyteidl/gen/pb_rust/flyteidl.service.rs @@ -408,4 +408,5 @@ pub struct UserInfoResponse { #[prost(message, optional, tag="8")] pub additional_claims: ::core::option::Option<::prost_types::Struct>, } -// @@protoc_insertion_point(module) +include!("flyteidl.service.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/flyteidl/gen/pb_rust/flyteidl.service.tonic.rs b/flyteidl/gen/pb_rust/flyteidl.service.tonic.rs new file mode 100644 index 0000000000..7fed85e2ed --- /dev/null +++ b/flyteidl/gen/pb_rust/flyteidl.service.tonic.rs @@ -0,0 +1,8850 @@ +// @generated +/// Generated client implementations. +pub mod admin_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** The following defines an RPC service that is also served over HTTP via grpc-gateway. + Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go +*/ + #[derive(Debug, Clone)] + pub struct AdminServiceClient { + inner: tonic::client::Grpc, + } + impl AdminServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AdminServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AdminServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AdminServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** Create and upload a :ref:`ref_flyteidl.admin.Task` definition +*/ + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "CreateTask")); + self.inner.unary(req, path, codec).await + } + /** Fetch a :ref:`ref_flyteidl.admin.Task` definition. +*/ + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetTask")); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. +*/ + pub async fn list_task_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTaskIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTaskIds")); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. +*/ + pub async fn list_tasks( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTasks", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTasks")); + self.inner.unary(req, path, codec).await + } + /** Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition +*/ + pub async fn create_workflow( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateWorkflow"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. +*/ + pub async fn get_workflow( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetWorkflow")); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. +*/ + pub async fn list_workflow_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListWorkflowIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflowIds"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. +*/ + pub async fn list_workflows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListWorkflows", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflows"), + ); + self.inner.unary(req, path, codec).await + } + /** Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition +*/ + pub async fn create_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::LaunchPlanCreateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. +*/ + pub async fn get_launch_plan( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + pub async fn get_active_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ActiveLaunchPlanRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetActiveLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetActiveLaunchPlan", + ), + ); + self.inner.unary(req, path, codec).await + } + /** List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + pub async fn list_active_launch_plans( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ActiveLaunchPlanListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListActiveLaunchPlans", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListActiveLaunchPlans", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. +*/ + pub async fn list_launch_plan_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListLaunchPlanIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlanIds"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. +*/ + pub async fn list_launch_plans( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListLaunchPlans", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlans"), + ); + self.inner.unary(req, path, codec).await + } + /** Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + pub async fn update_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::LaunchPlanUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /** Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` +*/ + pub async fn create_execution( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` +*/ + pub async fn relaunch_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionRelaunchRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RelaunchExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RelaunchExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Recreates a previously-run workflow execution that will only start executing from the last known failure point. + In Recover mode, users cannot change any input parameters or update the version of the execution. + This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, + downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. + See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. +*/ + pub async fn recover_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionRecoverRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RecoverExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RecoverExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn get_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn update_execution( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn get_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetExecutionData"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn list_executions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListExecutions"), + ); + self.inner.unary(req, path, codec).await + } + /** Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn terminate_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionTerminateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/TerminateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "TerminateExecution", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + pub async fn get_node_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNodeExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetNodeExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. +*/ + pub async fn get_dynamic_node_workflow( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::GetDynamicNodeWorkflowRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetDynamicNodeWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetDynamicNodeWorkflow", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + pub async fn list_node_executions( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNodeExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListNodeExecutions", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + pub async fn list_node_executions_for_task( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionForTaskListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNodeExecutionsForTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListNodeExecutionsForTask", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + pub async fn get_node_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNodeExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetNodeExecutionData", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. +*/ + pub async fn register_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RegisterProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RegisterProject"), + ); + self.inner.unary(req, path, codec).await + } + /** Updates an existing :ref:`ref_flyteidl.admin.Project` + flyteidl.admin.Project should be passed but the domains property should be empty; + it will be ignored in the handler as domains cannot be updated via this API. +*/ + pub async fn update_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateProject"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a :ref:`ref_flyteidl.admin.Project` +*/ + pub async fn get_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetProject")); + self.inner.unary(req, path, codec).await + } + /** Fetches a list of :ref:`ref_flyteidl.admin.Project` +*/ + pub async fn list_projects( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListProjects", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListProjects"), + ); + self.inner.unary(req, path, codec).await + } + /** Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. +*/ + pub async fn create_workflow_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateWorkflowEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "CreateWorkflowEvent", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. +*/ + pub async fn create_node_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateNodeEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateNodeEvent"), + ); + self.inner.unary(req, path, codec).await + } + /** Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. +*/ + pub async fn create_task_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateTaskEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateTaskEvent"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + pub async fn get_task_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTaskExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetTaskExecution"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + pub async fn list_task_executions( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTaskExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListTaskExecutions", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + pub async fn get_task_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTaskExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetTaskExecutionData", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + pub async fn update_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + pub async fn get_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + pub async fn delete_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level +*/ + pub async fn update_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + pub async fn get_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + pub async fn delete_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + pub async fn update_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + pub async fn get_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + pub async fn delete_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. +*/ + pub async fn list_matchable_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ListMatchableAttributesRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListMatchableAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListMatchableAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. +*/ + pub async fn list_named_entities( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNamedEntities", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListNamedEntities"), + ); + self.inner.unary(req, path, codec).await + } + /** Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. +*/ + pub async fn get_named_entity( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNamedEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetNamedEntity"), + ); + self.inner.unary(req, path, codec).await + } + /** Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. +*/ + pub async fn update_named_entity( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateNamedEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateNamedEntity"), + ); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetVersion", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetVersion")); + self.inner.unary(req, path, codec).await + } + /** Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. +*/ + pub async fn get_description_entity( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetDescriptionEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetDescriptionEntity", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. +*/ + pub async fn list_description_entities( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::DescriptionEntityListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListDescriptionEntities", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListDescriptionEntities", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. +*/ + pub async fn get_execution_metrics( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetMetricsRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecutionMetrics", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetExecutionMetrics", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod admin_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AdminServiceServer. + #[async_trait] + pub trait AdminService: Send + Sync + 'static { + /** Create and upload a :ref:`ref_flyteidl.admin.Task` definition +*/ + async fn create_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a :ref:`ref_flyteidl.admin.Task` definition. +*/ + async fn get_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. +*/ + async fn list_task_ids( + &self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. +*/ + async fn list_tasks( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition +*/ + async fn create_workflow( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. +*/ + async fn get_workflow( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. +*/ + async fn list_workflow_ids( + &self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. +*/ + async fn list_workflows( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition +*/ + async fn create_launch_plan( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. +*/ + async fn get_launch_plan( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + async fn get_active_launch_plan( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + async fn list_active_launch_plans( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. +*/ + async fn list_launch_plan_ids( + &self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. +*/ + async fn list_launch_plans( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. +*/ + async fn update_launch_plan( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` +*/ + async fn create_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` +*/ + async fn relaunch_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Recreates a previously-run workflow execution that will only start executing from the last known failure point. + In Recover mode, users cannot change any input parameters or update the version of the execution. + This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, + downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. + See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. +*/ + async fn recover_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn get_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn update_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn get_execution_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn list_executions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn terminate_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + async fn get_node_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. +*/ + async fn get_dynamic_node_workflow( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + async fn list_node_executions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + async fn list_node_executions_for_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. +*/ + async fn get_node_execution_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. +*/ + async fn register_project( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Updates an existing :ref:`ref_flyteidl.admin.Project` + flyteidl.admin.Project should be passed but the domains property should be empty; + it will be ignored in the handler as domains cannot be updated via this API. +*/ + async fn update_project( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a :ref:`ref_flyteidl.admin.Project` +*/ + async fn get_project( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a list of :ref:`ref_flyteidl.admin.Project` +*/ + async fn list_projects( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. +*/ + async fn create_workflow_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. +*/ + async fn create_node_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. +*/ + async fn create_task_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + async fn get_task_execution( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + async fn list_task_executions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. +*/ + async fn get_task_execution_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + async fn update_project_domain_attributes( + &self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + async fn get_project_domain_attributes( + &self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + async fn delete_project_domain_attributes( + &self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level +*/ + async fn update_project_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + async fn get_project_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. +*/ + async fn delete_project_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + async fn update_workflow_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + async fn get_workflow_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. +*/ + async fn delete_workflow_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. +*/ + async fn list_matchable_attributes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. +*/ + async fn list_named_entities( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. +*/ + async fn get_named_entity( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. +*/ + async fn update_named_entity( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + async fn get_version( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. +*/ + async fn get_description_entity( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. +*/ + async fn list_description_entities( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. +*/ + async fn get_execution_metrics( + &self, + request: tonic::Request< + super::super::admin::WorkflowExecutionGetMetricsRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** The following defines an RPC service that is also served over HTTP via grpc-gateway. + Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go +*/ + #[derive(Debug)] + pub struct AdminServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AdminServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AdminServiceServer + where + T: AdminService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.AdminService/CreateTask" => { + #[allow(non_camel_case_types)] + struct CreateTaskSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for CreateTaskSvc { + type Response = super::super::admin::TaskCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::TaskCreateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_task(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetTask" => { + #[allow(non_camel_case_types)] + struct GetTaskSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetTaskSvc { + type Response = super::super::admin::Task; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ObjectGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListTaskIds" => { + #[allow(non_camel_case_types)] + struct ListTaskIdsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityIdentifierListRequest, + > for ListTaskIdsSvc { + type Response = super::super::admin::NamedEntityIdentifierList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_task_ids(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListTaskIdsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListTasks" => { + #[allow(non_camel_case_types)] + struct ListTasksSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ResourceListRequest, + > for ListTasksSvc { + type Response = super::super::admin::TaskList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ResourceListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_tasks(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListTasksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateWorkflow" => { + #[allow(non_camel_case_types)] + struct CreateWorkflowSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowCreateRequest, + > for CreateWorkflowSvc { + type Response = super::super::admin::WorkflowCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowCreateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_workflow(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateWorkflowSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetWorkflow" => { + #[allow(non_camel_case_types)] + struct GetWorkflowSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetWorkflowSvc { + type Response = super::super::admin::Workflow; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ObjectGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_workflow(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetWorkflowSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListWorkflowIds" => { + #[allow(non_camel_case_types)] + struct ListWorkflowIdsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityIdentifierListRequest, + > for ListWorkflowIdsSvc { + type Response = super::super::admin::NamedEntityIdentifierList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_workflow_ids(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListWorkflowIdsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListWorkflows" => { + #[allow(non_camel_case_types)] + struct ListWorkflowsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ResourceListRequest, + > for ListWorkflowsSvc { + type Response = super::super::admin::WorkflowList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ResourceListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_workflows(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListWorkflowsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateLaunchPlan" => { + #[allow(non_camel_case_types)] + struct CreateLaunchPlanSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::LaunchPlanCreateRequest, + > for CreateLaunchPlanSvc { + type Response = super::super::admin::LaunchPlanCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::LaunchPlanCreateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_launch_plan(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateLaunchPlanSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetLaunchPlan" => { + #[allow(non_camel_case_types)] + struct GetLaunchPlanSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetLaunchPlanSvc { + type Response = super::super::admin::LaunchPlan; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ObjectGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_launch_plan(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetLaunchPlanSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetActiveLaunchPlan" => { + #[allow(non_camel_case_types)] + struct GetActiveLaunchPlanSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ActiveLaunchPlanRequest, + > for GetActiveLaunchPlanSvc { + type Response = super::super::admin::LaunchPlan; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ActiveLaunchPlanRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_active_launch_plan(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetActiveLaunchPlanSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListActiveLaunchPlans" => { + #[allow(non_camel_case_types)] + struct ListActiveLaunchPlansSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ActiveLaunchPlanListRequest, + > for ListActiveLaunchPlansSvc { + type Response = super::super::admin::LaunchPlanList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ActiveLaunchPlanListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_active_launch_plans( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListActiveLaunchPlansSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListLaunchPlanIds" => { + #[allow(non_camel_case_types)] + struct ListLaunchPlanIdsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityIdentifierListRequest, + > for ListLaunchPlanIdsSvc { + type Response = super::super::admin::NamedEntityIdentifierList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_launch_plan_ids(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListLaunchPlanIdsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListLaunchPlans" => { + #[allow(non_camel_case_types)] + struct ListLaunchPlansSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ResourceListRequest, + > for ListLaunchPlansSvc { + type Response = super::super::admin::LaunchPlanList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ResourceListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_launch_plans(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListLaunchPlansSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateLaunchPlan" => { + #[allow(non_camel_case_types)] + struct UpdateLaunchPlanSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::LaunchPlanUpdateRequest, + > for UpdateLaunchPlanSvc { + type Response = super::super::admin::LaunchPlanUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::LaunchPlanUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_launch_plan(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateLaunchPlanSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateExecution" => { + #[allow(non_camel_case_types)] + struct CreateExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ExecutionCreateRequest, + > for CreateExecutionSvc { + type Response = super::super::admin::ExecutionCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ExecutionCreateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_execution(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/RelaunchExecution" => { + #[allow(non_camel_case_types)] + struct RelaunchExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ExecutionRelaunchRequest, + > for RelaunchExecutionSvc { + type Response = super::super::admin::ExecutionCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ExecutionRelaunchRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::relaunch_execution(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RelaunchExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/RecoverExecution" => { + #[allow(non_camel_case_types)] + struct RecoverExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ExecutionRecoverRequest, + > for RecoverExecutionSvc { + type Response = super::super::admin::ExecutionCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ExecutionRecoverRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::recover_execution(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RecoverExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetExecution" => { + #[allow(non_camel_case_types)] + struct GetExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowExecutionGetRequest, + > for GetExecutionSvc { + type Response = super::super::admin::Execution; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowExecutionGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_execution(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateExecution" => { + #[allow(non_camel_case_types)] + struct UpdateExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ExecutionUpdateRequest, + > for UpdateExecutionSvc { + type Response = super::super::admin::ExecutionUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ExecutionUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_execution(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetExecutionData" => { + #[allow(non_camel_case_types)] + struct GetExecutionDataSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowExecutionGetDataRequest, + > for GetExecutionDataSvc { + type Response = super::super::admin::WorkflowExecutionGetDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowExecutionGetDataRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_execution_data(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetExecutionDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListExecutions" => { + #[allow(non_camel_case_types)] + struct ListExecutionsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ResourceListRequest, + > for ListExecutionsSvc { + type Response = super::super::admin::ExecutionList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ResourceListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_executions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListExecutionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/TerminateExecution" => { + #[allow(non_camel_case_types)] + struct TerminateExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ExecutionTerminateRequest, + > for TerminateExecutionSvc { + type Response = super::super::admin::ExecutionTerminateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ExecutionTerminateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::terminate_execution(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TerminateExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetNodeExecution" => { + #[allow(non_camel_case_types)] + struct GetNodeExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NodeExecutionGetRequest, + > for GetNodeExecutionSvc { + type Response = super::super::admin::NodeExecution; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NodeExecutionGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_node_execution(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetNodeExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetDynamicNodeWorkflow" => { + #[allow(non_camel_case_types)] + struct GetDynamicNodeWorkflowSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::GetDynamicNodeWorkflowRequest, + > for GetDynamicNodeWorkflowSvc { + type Response = super::super::admin::DynamicNodeWorkflowResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::GetDynamicNodeWorkflowRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_dynamic_node_workflow( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetDynamicNodeWorkflowSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListNodeExecutions" => { + #[allow(non_camel_case_types)] + struct ListNodeExecutionsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NodeExecutionListRequest, + > for ListNodeExecutionsSvc { + type Response = super::super::admin::NodeExecutionList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NodeExecutionListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_node_executions(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListNodeExecutionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListNodeExecutionsForTask" => { + #[allow(non_camel_case_types)] + struct ListNodeExecutionsForTaskSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NodeExecutionForTaskListRequest, + > for ListNodeExecutionsForTaskSvc { + type Response = super::super::admin::NodeExecutionList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NodeExecutionForTaskListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_node_executions_for_task( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListNodeExecutionsForTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetNodeExecutionData" => { + #[allow(non_camel_case_types)] + struct GetNodeExecutionDataSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NodeExecutionGetDataRequest, + > for GetNodeExecutionDataSvc { + type Response = super::super::admin::NodeExecutionGetDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NodeExecutionGetDataRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_node_execution_data( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetNodeExecutionDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/RegisterProject" => { + #[allow(non_camel_case_types)] + struct RegisterProjectSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectRegisterRequest, + > for RegisterProjectSvc { + type Response = super::super::admin::ProjectRegisterResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectRegisterRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::register_project(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RegisterProjectSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateProject" => { + #[allow(non_camel_case_types)] + struct UpdateProjectSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for UpdateProjectSvc { + type Response = super::super::admin::ProjectUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_project(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateProjectSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetProject" => { + #[allow(non_camel_case_types)] + struct GetProjectSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetProjectSvc { + type Response = super::super::admin::Project; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_project(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetProjectSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListProjects" => { + #[allow(non_camel_case_types)] + struct ListProjectsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectListRequest, + > for ListProjectsSvc { + type Response = super::super::admin::Projects; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_projects(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListProjectsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateWorkflowEvent" => { + #[allow(non_camel_case_types)] + struct CreateWorkflowEventSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowExecutionEventRequest, + > for CreateWorkflowEventSvc { + type Response = super::super::admin::WorkflowExecutionEventResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowExecutionEventRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_workflow_event(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateWorkflowEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateNodeEvent" => { + #[allow(non_camel_case_types)] + struct CreateNodeEventSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NodeExecutionEventRequest, + > for CreateNodeEventSvc { + type Response = super::super::admin::NodeExecutionEventResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NodeExecutionEventRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_node_event(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateNodeEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/CreateTaskEvent" => { + #[allow(non_camel_case_types)] + struct CreateTaskEventSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::TaskExecutionEventRequest, + > for CreateTaskEventSvc { + type Response = super::super::admin::TaskExecutionEventResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::TaskExecutionEventRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_task_event(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateTaskEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetTaskExecution" => { + #[allow(non_camel_case_types)] + struct GetTaskExecutionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::TaskExecutionGetRequest, + > for GetTaskExecutionSvc { + type Response = super::super::admin::TaskExecution; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::TaskExecutionGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task_execution(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskExecutionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListTaskExecutions" => { + #[allow(non_camel_case_types)] + struct ListTaskExecutionsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::TaskExecutionListRequest, + > for ListTaskExecutionsSvc { + type Response = super::super::admin::TaskExecutionList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::TaskExecutionListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_task_executions(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListTaskExecutionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetTaskExecutionData" => { + #[allow(non_camel_case_types)] + struct GetTaskExecutionDataSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::TaskExecutionGetDataRequest, + > for GetTaskExecutionDataSvc { + type Response = super::super::admin::TaskExecutionGetDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::TaskExecutionGetDataRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task_execution_data( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskExecutionDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateProjectDomainAttributes" => { + #[allow(non_camel_case_types)] + struct UpdateProjectDomainAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectDomainAttributesUpdateRequest, + > for UpdateProjectDomainAttributesSvc { + type Response = super::super::admin::ProjectDomainAttributesUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_project_domain_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateProjectDomainAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetProjectDomainAttributes" => { + #[allow(non_camel_case_types)] + struct GetProjectDomainAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectDomainAttributesGetRequest, + > for GetProjectDomainAttributesSvc { + type Response = super::super::admin::ProjectDomainAttributesGetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_project_domain_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetProjectDomainAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/DeleteProjectDomainAttributes" => { + #[allow(non_camel_case_types)] + struct DeleteProjectDomainAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectDomainAttributesDeleteRequest, + > for DeleteProjectDomainAttributesSvc { + type Response = super::super::admin::ProjectDomainAttributesDeleteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectDomainAttributesDeleteRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_project_domain_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteProjectDomainAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateProjectAttributes" => { + #[allow(non_camel_case_types)] + struct UpdateProjectAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectAttributesUpdateRequest, + > for UpdateProjectAttributesSvc { + type Response = super::super::admin::ProjectAttributesUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectAttributesUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_project_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateProjectAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetProjectAttributes" => { + #[allow(non_camel_case_types)] + struct GetProjectAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectAttributesGetRequest, + > for GetProjectAttributesSvc { + type Response = super::super::admin::ProjectAttributesGetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectAttributesGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_project_attributes(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetProjectAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/DeleteProjectAttributes" => { + #[allow(non_camel_case_types)] + struct DeleteProjectAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ProjectAttributesDeleteRequest, + > for DeleteProjectAttributesSvc { + type Response = super::super::admin::ProjectAttributesDeleteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ProjectAttributesDeleteRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_project_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteProjectAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateWorkflowAttributes" => { + #[allow(non_camel_case_types)] + struct UpdateWorkflowAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowAttributesUpdateRequest, + > for UpdateWorkflowAttributesSvc { + type Response = super::super::admin::WorkflowAttributesUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowAttributesUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_workflow_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateWorkflowAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetWorkflowAttributes" => { + #[allow(non_camel_case_types)] + struct GetWorkflowAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowAttributesGetRequest, + > for GetWorkflowAttributesSvc { + type Response = super::super::admin::WorkflowAttributesGetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowAttributesGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_workflow_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetWorkflowAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/DeleteWorkflowAttributes" => { + #[allow(non_camel_case_types)] + struct DeleteWorkflowAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowAttributesDeleteRequest, + > for DeleteWorkflowAttributesSvc { + type Response = super::super::admin::WorkflowAttributesDeleteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowAttributesDeleteRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_workflow_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteWorkflowAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListMatchableAttributes" => { + #[allow(non_camel_case_types)] + struct ListMatchableAttributesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::ListMatchableAttributesRequest, + > for ListMatchableAttributesSvc { + type Response = super::super::admin::ListMatchableAttributesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ListMatchableAttributesRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_matchable_attributes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListMatchableAttributesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListNamedEntities" => { + #[allow(non_camel_case_types)] + struct ListNamedEntitiesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityListRequest, + > for ListNamedEntitiesSvc { + type Response = super::super::admin::NamedEntityList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_named_entities(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListNamedEntitiesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetNamedEntity" => { + #[allow(non_camel_case_types)] + struct GetNamedEntitySvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityGetRequest, + > for GetNamedEntitySvc { + type Response = super::super::admin::NamedEntity; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_named_entity(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetNamedEntitySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/UpdateNamedEntity" => { + #[allow(non_camel_case_types)] + struct UpdateNamedEntitySvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::NamedEntityUpdateRequest, + > for UpdateNamedEntitySvc { + type Response = super::super::admin::NamedEntityUpdateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::NamedEntityUpdateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_named_entity(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateNamedEntitySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetVersion" => { + #[allow(non_camel_case_types)] + struct GetVersionSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetVersionSvc { + type Response = super::super::admin::GetVersionResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::GetVersionRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_version(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetDescriptionEntity" => { + #[allow(non_camel_case_types)] + struct GetDescriptionEntitySvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService + for GetDescriptionEntitySvc { + type Response = super::super::admin::DescriptionEntity; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ObjectGetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_description_entity(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetDescriptionEntitySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/ListDescriptionEntities" => { + #[allow(non_camel_case_types)] + struct ListDescriptionEntitiesSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::DescriptionEntityListRequest, + > for ListDescriptionEntitiesSvc { + type Response = super::super::admin::DescriptionEntityList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::DescriptionEntityListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_description_entities( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListDescriptionEntitiesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AdminService/GetExecutionMetrics" => { + #[allow(non_camel_case_types)] + struct GetExecutionMetricsSvc(pub Arc); + impl< + T: AdminService, + > tonic::server::UnaryService< + super::super::admin::WorkflowExecutionGetMetricsRequest, + > for GetExecutionMetricsSvc { + type Response = super::super::admin::WorkflowExecutionGetMetricsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::WorkflowExecutionGetMetricsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_execution_metrics(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetExecutionMetricsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AdminServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for AdminServiceServer { + const NAME: &'static str = "flyteidl.service.AdminService"; + } +} +/// Generated client implementations. +pub mod sync_agent_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SyncAgentServiceClient { + inner: tonic::client::Grpc, + } + impl SyncAgentServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SyncAgentServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SyncAgentServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. +*/ + pub async fn execute_task_sync( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::super::admin::ExecuteTaskSyncRequest, + >, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SyncAgentService/ExecuteTaskSync", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.SyncAgentService", + "ExecuteTaskSync", + ), + ); + self.inner.streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod sync_agent_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with SyncAgentServiceServer. + #[async_trait] + pub trait SyncAgentService: Send + Sync + 'static { + /// Server streaming response type for the ExecuteTaskSync method. + type ExecuteTaskSyncStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result< + super::super::admin::ExecuteTaskSyncResponse, + tonic::Status, + >, + > + + Send + + 'static; + /** ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. +*/ + async fn execute_task_sync( + &self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct SyncAgentServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl SyncAgentServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for SyncAgentServiceServer + where + T: SyncAgentService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.SyncAgentService/ExecuteTaskSync" => { + #[allow(non_camel_case_types)] + struct ExecuteTaskSyncSvc(pub Arc); + impl< + T: SyncAgentService, + > tonic::server::StreamingService< + super::super::admin::ExecuteTaskSyncRequest, + > for ExecuteTaskSyncSvc { + type Response = super::super::admin::ExecuteTaskSyncResponse; + type ResponseStream = T::ExecuteTaskSyncStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming< + super::super::admin::ExecuteTaskSyncRequest, + >, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::execute_task_sync(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExecuteTaskSyncSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for SyncAgentServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for SyncAgentServiceServer { + const NAME: &'static str = "flyteidl.service.SyncAgentService"; + } +} +/// Generated client implementations. +pub mod async_agent_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. +*/ + #[derive(Debug, Clone)] + pub struct AsyncAgentServiceClient { + inner: tonic::client::Grpc, + } + impl AsyncAgentServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AsyncAgentServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AsyncAgentServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AsyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** CreateTask sends a task create request to the agent service. +*/ + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "CreateTask"), + ); + self.inner.unary(req, path, codec).await + } + /** Get job status. +*/ + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTask"), + ); + self.inner.unary(req, path, codec).await + } + /** Delete the task resource. +*/ + pub async fn delete_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/DeleteTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "DeleteTask"), + ); + self.inner.unary(req, path, codec).await + } + /** GetTaskMetrics returns one or more task execution metrics, if available. + + Errors include + * OutOfRange if metrics are not available for the specified task time range + * various other errors +*/ + pub async fn get_task_metrics( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTaskMetrics", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AsyncAgentService", + "GetTaskMetrics", + ), + ); + self.inner.unary(req, path, codec).await + } + /** GetTaskLogs returns task execution logs, if available. +*/ + pub async fn get_task_logs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTaskLogs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTaskLogs"), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod async_agent_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AsyncAgentServiceServer. + #[async_trait] + pub trait AsyncAgentService: Send + Sync + 'static { + /** CreateTask sends a task create request to the agent service. +*/ + async fn create_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Get job status. +*/ + async fn get_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Delete the task resource. +*/ + async fn delete_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** GetTaskMetrics returns one or more task execution metrics, if available. + + Errors include + * OutOfRange if metrics are not available for the specified task time range + * various other errors +*/ + async fn get_task_metrics( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTaskLogs method. + type GetTaskLogsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result< + super::super::admin::GetTaskLogsResponse, + tonic::Status, + >, + > + + Send + + 'static; + /** GetTaskLogs returns task execution logs, if available. +*/ + async fn get_task_logs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. +*/ + #[derive(Debug)] + pub struct AsyncAgentServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AsyncAgentServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AsyncAgentServiceServer + where + T: AsyncAgentService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.AsyncAgentService/CreateTask" => { + #[allow(non_camel_case_types)] + struct CreateTaskSvc(pub Arc); + impl< + T: AsyncAgentService, + > tonic::server::UnaryService + for CreateTaskSvc { + type Response = super::super::admin::CreateTaskResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::CreateTaskRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_task(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AsyncAgentService/GetTask" => { + #[allow(non_camel_case_types)] + struct GetTaskSvc(pub Arc); + impl< + T: AsyncAgentService, + > tonic::server::UnaryService + for GetTaskSvc { + type Response = super::super::admin::GetTaskResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AsyncAgentService/DeleteTask" => { + #[allow(non_camel_case_types)] + struct DeleteTaskSvc(pub Arc); + impl< + T: AsyncAgentService, + > tonic::server::UnaryService + for DeleteTaskSvc { + type Response = super::super::admin::DeleteTaskResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::DeleteTaskRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_task(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AsyncAgentService/GetTaskMetrics" => { + #[allow(non_camel_case_types)] + struct GetTaskMetricsSvc(pub Arc); + impl< + T: AsyncAgentService, + > tonic::server::UnaryService< + super::super::admin::GetTaskMetricsRequest, + > for GetTaskMetricsSvc { + type Response = super::super::admin::GetTaskMetricsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::GetTaskMetricsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task_metrics(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskMetricsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AsyncAgentService/GetTaskLogs" => { + #[allow(non_camel_case_types)] + struct GetTaskLogsSvc(pub Arc); + impl< + T: AsyncAgentService, + > tonic::server::ServerStreamingService< + super::super::admin::GetTaskLogsRequest, + > for GetTaskLogsSvc { + type Response = super::super::admin::GetTaskLogsResponse; + type ResponseStream = T::GetTaskLogsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::GetTaskLogsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task_logs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskLogsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AsyncAgentServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for AsyncAgentServiceServer { + const NAME: &'static str = "flyteidl.service.AsyncAgentService"; + } +} +/// Generated client implementations. +pub mod agent_metadata_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. + This service allows propeller or users to get the metadata of agents. +*/ + #[derive(Debug, Clone)] + pub struct AgentMetadataServiceClient { + inner: tonic::client::Grpc, + } + impl AgentMetadataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AgentMetadataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AgentMetadataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AgentMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** Fetch a :ref:`ref_flyteidl.admin.Agent` definition. +*/ + pub async fn get_agent( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AgentMetadataService/GetAgent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AgentMetadataService", "GetAgent"), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. +*/ + pub async fn list_agents( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AgentMetadataService/ListAgents", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AgentMetadataService", + "ListAgents", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod agent_metadata_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AgentMetadataServiceServer. + #[async_trait] + pub trait AgentMetadataService: Send + Sync + 'static { + /** Fetch a :ref:`ref_flyteidl.admin.Agent` definition. +*/ + async fn get_agent( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. +*/ + async fn list_agents( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. + This service allows propeller or users to get the metadata of agents. +*/ + #[derive(Debug)] + pub struct AgentMetadataServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AgentMetadataServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for AgentMetadataServiceServer + where + T: AgentMetadataService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.AgentMetadataService/GetAgent" => { + #[allow(non_camel_case_types)] + struct GetAgentSvc(pub Arc); + impl< + T: AgentMetadataService, + > tonic::server::UnaryService + for GetAgentSvc { + type Response = super::super::admin::GetAgentResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_agent(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetAgentSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AgentMetadataService/ListAgents" => { + #[allow(non_camel_case_types)] + struct ListAgentsSvc(pub Arc); + impl< + T: AgentMetadataService, + > tonic::server::UnaryService + for ListAgentsSvc { + type Response = super::super::admin::ListAgentsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::ListAgentsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_agents(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListAgentsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AgentMetadataServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for AgentMetadataServiceServer { + const NAME: &'static str = "flyteidl.service.AgentMetadataService"; + } +} +/// Generated client implementations. +pub mod auth_metadata_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** The following defines an RPC service that is also served over HTTP via grpc-gateway. + Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go + RPCs defined in this service must be anonymously accessible. +*/ + #[derive(Debug, Clone)] + pub struct AuthMetadataServiceClient { + inner: tonic::client::Grpc, + } + impl AuthMetadataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AuthMetadataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AuthMetadataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AuthMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** Anonymously accessible. Retrieves local or external oauth authorization server metadata. +*/ + pub async fn get_o_auth2_metadata( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AuthMetadataService/GetOAuth2Metadata", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AuthMetadataService", + "GetOAuth2Metadata", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization + requests. +*/ + pub async fn get_public_client_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AuthMetadataService/GetPublicClientConfig", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AuthMetadataService", + "GetPublicClientConfig", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod auth_metadata_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AuthMetadataServiceServer. + #[async_trait] + pub trait AuthMetadataService: Send + Sync + 'static { + /** Anonymously accessible. Retrieves local or external oauth authorization server metadata. +*/ + async fn get_o_auth2_metadata( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization + requests. +*/ + async fn get_public_client_config( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** The following defines an RPC service that is also served over HTTP via grpc-gateway. + Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go + RPCs defined in this service must be anonymously accessible. +*/ + #[derive(Debug)] + pub struct AuthMetadataServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AuthMetadataServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AuthMetadataServiceServer + where + T: AuthMetadataService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.AuthMetadataService/GetOAuth2Metadata" => { + #[allow(non_camel_case_types)] + struct GetOAuth2MetadataSvc(pub Arc); + impl< + T: AuthMetadataService, + > tonic::server::UnaryService + for GetOAuth2MetadataSvc { + type Response = super::OAuth2MetadataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_o_auth2_metadata( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetOAuth2MetadataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.AuthMetadataService/GetPublicClientConfig" => { + #[allow(non_camel_case_types)] + struct GetPublicClientConfigSvc(pub Arc); + impl< + T: AuthMetadataService, + > tonic::server::UnaryService + for GetPublicClientConfigSvc { + type Response = super::PublicClientAuthConfigResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_public_client_config( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetPublicClientConfigSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AuthMetadataServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for AuthMetadataServiceServer { + const NAME: &'static str = "flyteidl.service.AuthMetadataService"; + } +} +/// Generated client implementations. +pub mod data_proxy_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. +*/ + #[derive(Debug, Clone)] + pub struct DataProxyServiceClient { + inner: tonic::client::Grpc, + } + impl DataProxyServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataProxyServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataProxyServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DataProxyServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. +*/ + pub async fn create_upload_location( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateUploadLocation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateUploadLocation", + ), + ); + self.inner.unary(req, path, codec).await + } + /** CreateDownloadLocation creates a signed url to download artifacts. +*/ + pub async fn create_download_location( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateDownloadLocation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateDownloadLocation", + ), + ); + self.inner.unary(req, path, codec).await + } + /** CreateDownloadLocation creates a signed url to download artifacts. +*/ + pub async fn create_download_link( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateDownloadLink", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateDownloadLink", + ), + ); + self.inner.unary(req, path, codec).await + } + /// + pub async fn get_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/GetData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.DataProxyService", "GetData")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod data_proxy_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with DataProxyServiceServer. + #[async_trait] + pub trait DataProxyService: Send + Sync + 'static { + /** CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. +*/ + async fn create_upload_location( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** CreateDownloadLocation creates a signed url to download artifacts. +*/ + async fn create_download_location( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** CreateDownloadLocation creates a signed url to download artifacts. +*/ + async fn create_download_link( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + async fn get_data( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + /** DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. +*/ + #[derive(Debug)] + pub struct DataProxyServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl DataProxyServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for DataProxyServiceServer + where + T: DataProxyService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.DataProxyService/CreateUploadLocation" => { + #[allow(non_camel_case_types)] + struct CreateUploadLocationSvc(pub Arc); + impl< + T: DataProxyService, + > tonic::server::UnaryService + for CreateUploadLocationSvc { + type Response = super::CreateUploadLocationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_upload_location( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateUploadLocationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.DataProxyService/CreateDownloadLocation" => { + #[allow(non_camel_case_types)] + struct CreateDownloadLocationSvc(pub Arc); + impl< + T: DataProxyService, + > tonic::server::UnaryService + for CreateDownloadLocationSvc { + type Response = super::CreateDownloadLocationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_download_location( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateDownloadLocationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.DataProxyService/CreateDownloadLink" => { + #[allow(non_camel_case_types)] + struct CreateDownloadLinkSvc(pub Arc); + impl< + T: DataProxyService, + > tonic::server::UnaryService + for CreateDownloadLinkSvc { + type Response = super::CreateDownloadLinkResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_download_link( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateDownloadLinkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.DataProxyService/GetData" => { + #[allow(non_camel_case_types)] + struct GetDataSvc(pub Arc); + impl< + T: DataProxyService, + > tonic::server::UnaryService + for GetDataSvc { + type Response = super::GetDataResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_data(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetDataSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for DataProxyServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for DataProxyServiceServer { + const NAME: &'static str = "flyteidl.service.DataProxyService"; + } +} +/// Generated client implementations. +pub mod external_plugin_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct ExternalPluginServiceClient { + inner: tonic::client::Grpc, + } + impl ExternalPluginServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ExternalPluginServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ExternalPluginServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + ExternalPluginServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.ExternalPluginService", + "CreateTask", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.ExternalPluginService", "GetTask"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn delete_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/DeleteTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.ExternalPluginService", + "DeleteTask", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod external_plugin_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ExternalPluginServiceServer. + #[async_trait] + pub trait ExternalPluginService: Send + Sync + 'static { + async fn create_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_task( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn delete_task( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct ExternalPluginServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl ExternalPluginServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for ExternalPluginServiceServer + where + T: ExternalPluginService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.ExternalPluginService/CreateTask" => { + #[allow(non_camel_case_types)] + struct CreateTaskSvc(pub Arc); + impl< + T: ExternalPluginService, + > tonic::server::UnaryService + for CreateTaskSvc { + type Response = super::TaskCreateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_task(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.ExternalPluginService/GetTask" => { + #[allow(non_camel_case_types)] + struct GetTaskSvc(pub Arc); + impl< + T: ExternalPluginService, + > tonic::server::UnaryService + for GetTaskSvc { + type Response = super::TaskGetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_task(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.ExternalPluginService/DeleteTask" => { + #[allow(non_camel_case_types)] + struct DeleteTaskSvc(pub Arc); + impl< + T: ExternalPluginService, + > tonic::server::UnaryService + for DeleteTaskSvc { + type Response = super::TaskDeleteResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete_task(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for ExternalPluginServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for ExternalPluginServiceServer { + const NAME: &'static str = "flyteidl.service.ExternalPluginService"; + } +} +/// Generated client implementations. +pub mod identity_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** IdentityService defines an RPC Service that interacts with user/app identities. +*/ + #[derive(Debug, Clone)] + pub struct IdentityServiceClient { + inner: tonic::client::Grpc, + } + impl IdentityServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl IdentityServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> IdentityServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + IdentityServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** Retrieves user information about the currently logged in user. +*/ + pub async fn user_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.IdentityService/UserInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.IdentityService", "UserInfo")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod identity_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with IdentityServiceServer. + #[async_trait] + pub trait IdentityService: Send + Sync + 'static { + /** Retrieves user information about the currently logged in user. +*/ + async fn user_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** IdentityService defines an RPC Service that interacts with user/app identities. +*/ + #[derive(Debug)] + pub struct IdentityServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl IdentityServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for IdentityServiceServer + where + T: IdentityService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.IdentityService/UserInfo" => { + #[allow(non_camel_case_types)] + struct UserInfoSvc(pub Arc); + impl< + T: IdentityService, + > tonic::server::UnaryService + for UserInfoSvc { + type Response = super::UserInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::user_info(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UserInfoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for IdentityServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for IdentityServiceServer { + const NAME: &'static str = "flyteidl.service.IdentityService"; + } +} +/// Generated client implementations. +pub mod signal_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /** SignalService defines an RPC Service that may create, update, and retrieve signal(s). +*/ + #[derive(Debug, Clone)] + pub struct SignalServiceClient { + inner: tonic::client::Grpc, + } + impl SignalServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SignalServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SignalServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SignalServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /** Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. +*/ + pub async fn get_or_create_signal( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::SignalGetOrCreateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/GetOrCreateSignal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.SignalService", + "GetOrCreateSignal", + ), + ); + self.inner.unary(req, path, codec).await + } + /** Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. +*/ + pub async fn list_signals( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/ListSignals", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.SignalService", "ListSignals"), + ); + self.inner.unary(req, path, codec).await + } + /** Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition +*/ + pub async fn set_signal( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/SetSignal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.SignalService", "SetSignal")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod signal_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with SignalServiceServer. + #[async_trait] + pub trait SignalService: Send + Sync + 'static { + /** Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. +*/ + async fn get_or_create_signal( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. +*/ + async fn list_signals( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /** Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition +*/ + async fn set_signal( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /** SignalService defines an RPC Service that may create, update, and retrieve signal(s). +*/ + #[derive(Debug)] + pub struct SignalServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl SignalServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for SignalServiceServer + where + T: SignalService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/flyteidl.service.SignalService/GetOrCreateSignal" => { + #[allow(non_camel_case_types)] + struct GetOrCreateSignalSvc(pub Arc); + impl< + T: SignalService, + > tonic::server::UnaryService< + super::super::admin::SignalGetOrCreateRequest, + > for GetOrCreateSignalSvc { + type Response = super::super::admin::Signal; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::SignalGetOrCreateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_create_signal(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetOrCreateSignalSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.SignalService/ListSignals" => { + #[allow(non_camel_case_types)] + struct ListSignalsSvc(pub Arc); + impl< + T: SignalService, + > tonic::server::UnaryService + for ListSignalsSvc { + type Response = super::super::admin::SignalList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::SignalListRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_signals(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListSignalsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl.service.SignalService/SetSignal" => { + #[allow(non_camel_case_types)] + struct SetSignalSvc(pub Arc); + impl< + T: SignalService, + > tonic::server::UnaryService + for SetSignalSvc { + type Response = super::super::admin::SignalSetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::admin::SignalSetRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::set_signal(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SetSignalSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for SignalServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for SignalServiceServer { + const NAME: &'static str = "flyteidl.service.SignalService"; + } +} From ab95f7e7241afd9c020b1fd1dedd69f9ff613baa Mon Sep 17 00:00:00 2001 From: Paul Dittamo <37558497+pvditt@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:27:37 -0700 Subject: [PATCH 11/15] enable parallelism to be set to nil for array node (#5214) * enable parallelism to be set to nil for array node Signed-off-by: Paul Dittamo * unit test Signed-off-by: Paul Dittamo --------- Signed-off-by: Paul Dittamo --- flyteidl/clients/go/assets/admin.swagger.json | 2 +- .../gen/pb-es/flyteidl/core/workflow_pb.ts | 24 +- .../gen/pb-go/flyteidl/core/workflow.pb.go | 363 ++++++++++-------- .../flyteidl/service/admin.swagger.json | 2 +- flyteidl/gen/pb-js/flyteidl.d.ts | 7 +- flyteidl/gen/pb-js/flyteidl.js | 29 +- .../pb_python/flyteidl/core/workflow_pb2.py | 83 ++-- .../pb_python/flyteidl/core/workflow_pb2.pyi | 1 + flyteidl/gen/pb_rust/flyteidl.core.rs | 18 +- flyteidl/protos/flyteidl/core/workflow.proto | 13 +- .../pkg/apis/flyteworkflow/v1alpha1/array.go | 4 +- .../apis/flyteworkflow/v1alpha1/array_test.go | 6 +- .../pkg/apis/flyteworkflow/v1alpha1/iface.go | 2 +- .../v1alpha1/mocks/ExecutableArrayNode.go | 12 +- .../pkg/compiler/transformers/k8s/node.go | 8 +- .../compiler/transformers/k8s/node_test.go | 33 +- .../pkg/controller/nodes/array/handler.go | 4 +- .../controller/nodes/array/handler_test.go | 29 +- 18 files changed, 373 insertions(+), 267 deletions(-) diff --git a/flyteidl/clients/go/assets/admin.swagger.json b/flyteidl/clients/go/assets/admin.swagger.json index 9273a46776..bcaf46928b 100644 --- a/flyteidl/clients/go/assets/admin.swagger.json +++ b/flyteidl/clients/go/assets/admin.swagger.json @@ -6483,7 +6483,7 @@ "description": "node is the sub-node that will be executed for each element in the array." }, "parallelism": { - "type": "string", + "type": "integer", "format": "int64", "description": "parallelism defines the minimum number of instances to bring up concurrently at any given\npoint. Note that this is an optimistic restriction and that, due to network partitioning or\nother failures, the actual number of currently running instances might be more. This has to\nbe a positive number if assigned. Default value is size." }, diff --git a/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts index 9efdcf91dd..0db88c5d3d 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/workflow_pb.ts @@ -4,7 +4,7 @@ // @ts-nocheck import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; -import { Duration, Message, proto3, protoInt64 } from "@bufbuild/protobuf"; +import { Duration, Message, proto3 } from "@bufbuild/protobuf"; import { BooleanExpression } from "./condition_pb.js"; import { Error, LiteralType } from "./types_pb.js"; import { Identifier } from "./identifier_pb.js"; @@ -507,14 +507,20 @@ export class ArrayNode extends Message { node?: Node; /** - * parallelism defines the minimum number of instances to bring up concurrently at any given - * point. Note that this is an optimistic restriction and that, due to network partitioning or - * other failures, the actual number of currently running instances might be more. This has to - * be a positive number if assigned. Default value is size. - * - * @generated from field: int64 parallelism = 2; + * @generated from oneof flyteidl.core.ArrayNode.parallelism_option */ - parallelism = protoInt64.zero; + parallelismOption: { + /** + * parallelism defines the minimum number of instances to bring up concurrently at any given + * point. Note that this is an optimistic restriction and that, due to network partitioning or + * other failures, the actual number of currently running instances might be more. This has to + * be a positive number if assigned. Default value is size. + * + * @generated from field: uint32 parallelism = 2; + */ + value: number; + case: "parallelism"; + } | { case: undefined; value?: undefined } = { case: undefined }; /** * @generated from oneof flyteidl.core.ArrayNode.success_criteria @@ -550,7 +556,7 @@ export class ArrayNode extends Message { static readonly typeName = "flyteidl.core.ArrayNode"; static readonly fields: FieldList = proto3.util.newFieldList(() => [ { no: 1, name: "node", kind: "message", T: Node }, - { no: 2, name: "parallelism", kind: "scalar", T: 3 /* ScalarType.INT64 */ }, + { no: 2, name: "parallelism", kind: "scalar", T: 13 /* ScalarType.UINT32 */, oneof: "parallelism_option" }, { no: 3, name: "min_successes", kind: "scalar", T: 13 /* ScalarType.UINT32 */, oneof: "success_criteria" }, { no: 4, name: "min_success_ratio", kind: "scalar", T: 2 /* ScalarType.FLOAT */, oneof: "success_criteria" }, ]); diff --git a/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go b/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go index 983bbdcf43..077f34da9b 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/workflow.pb.go @@ -10,6 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" sync "sync" ) @@ -723,11 +724,10 @@ type ArrayNode struct { // node is the sub-node that will be executed for each element in the array. Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // parallelism defines the minimum number of instances to bring up concurrently at any given - // point. Note that this is an optimistic restriction and that, due to network partitioning or - // other failures, the actual number of currently running instances might be more. This has to - // be a positive number if assigned. Default value is size. - Parallelism int64 `protobuf:"varint,2,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + // Types that are assignable to ParallelismOption: + // + // *ArrayNode_Parallelism + ParallelismOption isArrayNode_ParallelismOption `protobuf_oneof:"parallelism_option"` // Types that are assignable to SuccessCriteria: // // *ArrayNode_MinSuccesses @@ -774,8 +774,15 @@ func (x *ArrayNode) GetNode() *Node { return nil } -func (x *ArrayNode) GetParallelism() int64 { - if x != nil { +func (m *ArrayNode) GetParallelismOption() isArrayNode_ParallelismOption { + if m != nil { + return m.ParallelismOption + } + return nil +} + +func (x *ArrayNode) GetParallelism() uint32 { + if x, ok := x.GetParallelismOption().(*ArrayNode_Parallelism); ok { return x.Parallelism } return 0 @@ -802,6 +809,20 @@ func (x *ArrayNode) GetMinSuccessRatio() float32 { return 0 } +type isArrayNode_ParallelismOption interface { + isArrayNode_ParallelismOption() +} + +type ArrayNode_Parallelism struct { + // parallelism defines the minimum number of instances to bring up concurrently at any given + // point. Note that this is an optimistic restriction and that, due to network partitioning or + // other failures, the actual number of currently running instances might be more. This has to + // be a positive number if assigned. Default value is size. + Parallelism uint32 `protobuf:"varint,2,opt,name=parallelism,proto3,oneof"` +} + +func (*ArrayNode_Parallelism) isArrayNode_ParallelismOption() {} + type isArrayNode_SuccessCriteria interface { isArrayNode_SuccessCriteria() } @@ -1644,7 +1665,9 @@ var file_flyteidl_core_workflow_proto_rawDesc = []byte{ 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x07, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7b, 0x0a, 0x07, 0x49, 0x66, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x3e, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x65, @@ -1719,172 +1742,173 @@ var file_flyteidl_core_workflow_proto_rawDesc = []byte{ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x05, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x42, 0x0b, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbf, 0x01, 0x0a, 0x09, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd7, 0x01, 0x0a, 0x09, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, - 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, - 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, - 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x25, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0c, 0x6d, - 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x6d, - 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x12, 0x0a, 0x10, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x8c, 0x03, - 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, - 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, - 0x26, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, - 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x09, 0x63, 0x61, 0x63, 0x68, 0x65, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x09, 0x63, 0x61, - 0x63, 0x68, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, - 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, - 0x0a, 0x12, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x03, 0x52, 0x11, 0x63, 0x61, - 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x42, - 0x15, 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x1a, 0x0a, 0x18, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, 0x05, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x9f, 0x04, - 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x2e, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, - 0x2a, 0x0a, 0x11, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x0e, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x42, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x3c, 0x0a, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x6e, - 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x4e, 0x6f, - 0x64, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, - 0x52, 0x08, 0x67, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x61, 0x72, - 0x72, 0x61, 0x79, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, - 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, 0x72, 0x72, 0x61, - 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, - 0xfc, 0x02, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x12, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, - 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x10, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x09, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, - 0x67, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x0f, 0x4f, - 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, - 0x0a, 0x10, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x49, 0x4d, 0x4d, 0x45, 0x44, 0x49, 0x41, 0x54, 0x45, - 0x4c, 0x59, 0x10, 0x00, 0x12, 0x28, 0x0a, 0x24, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x41, 0x46, 0x54, - 0x45, 0x52, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, - 0x44, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x22, 0x40, - 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, - 0x22, 0xa2, 0x03, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, + 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, + 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x25, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, + 0x0c, 0x6d, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2c, 0x0a, + 0x11, 0x6d, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x48, 0x01, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x14, 0x0a, 0x12, 0x70, + 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x12, 0x0a, 0x10, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x72, 0x69, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x8c, 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, + 0x36, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x07, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, + 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, + 0x1e, 0x0a, 0x09, 0x63, 0x61, 0x63, 0x68, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x01, 0x52, 0x09, 0x63, 0x61, 0x63, 0x68, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x25, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x12, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x03, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x11, + 0x0a, 0x0f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x15, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1a, 0x0a, 0x18, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x76, 0x61, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x75, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x0e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x12, 0x36, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x08, + 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x42, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x0c, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x3c, 0x0a, 0x0b, + 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, - 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, - 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x07, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x0b, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, - 0x54, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, + 0x65, 0x2e, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x0a, + 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x61, + 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x08, 0x67, 0x61, 0x74, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, + 0x48, 0x00, 0x52, 0x09, 0x61, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x08, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xfc, 0x02, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x12, + 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x10, 0x71, 0x75, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0a, 0x6f, + 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x09, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x3d, 0x0a, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, + 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x0f, 0x4f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x49, + 0x4d, 0x4d, 0x45, 0x44, 0x49, 0x41, 0x54, 0x45, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x28, 0x0a, 0x24, + 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, + 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x22, 0x40, 0x0a, 0x18, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, + 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x22, 0xa2, 0x03, 0x0a, 0x10, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x29, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x11, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, - 0x64, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x22, 0xba, 0x01, - 0x0a, 0x12, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, - 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0b, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x42, 0xb3, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x42, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, - 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, - 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, - 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, - 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x30, 0x0a, + 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, + 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, + 0x36, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x10, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xc5, 0x01, + 0x0a, 0x11, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x12, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x50, 0x6c, 0x61, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x29, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x66, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x66, 0x61, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, + 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x52, 0x0b, 0x66, 0x69, 0x78, 0x65, 0x64, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x42, 0xb3, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2233,6 +2257,7 @@ func file_flyteidl_core_workflow_proto_init() { (*GateNode_Sleep)(nil), } file_flyteidl_core_workflow_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ArrayNode_Parallelism)(nil), (*ArrayNode_MinSuccesses)(nil), (*ArrayNode_MinSuccessRatio)(nil), } diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json index 9273a46776..bcaf46928b 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json @@ -6483,7 +6483,7 @@ "description": "node is the sub-node that will be executed for each element in the array." }, "parallelism": { - "type": "string", + "type": "integer", "format": "int64", "description": "parallelism defines the minimum number of instances to bring up concurrently at any given\npoint. Note that this is an optimistic restriction and that, due to network partitioning or\nother failures, the actual number of currently running instances might be more. This has to\nbe a positive number if assigned. Default value is size." }, diff --git a/flyteidl/gen/pb-js/flyteidl.d.ts b/flyteidl/gen/pb-js/flyteidl.d.ts index 0126d892f2..54bd68f461 100644 --- a/flyteidl/gen/pb-js/flyteidl.d.ts +++ b/flyteidl/gen/pb-js/flyteidl.d.ts @@ -4506,7 +4506,7 @@ export namespace flyteidl { node?: (flyteidl.core.INode|null); /** ArrayNode parallelism */ - parallelism?: (Long|null); + parallelism?: (number|null); /** ArrayNode minSuccesses */ minSuccesses?: (number|null); @@ -4528,7 +4528,7 @@ export namespace flyteidl { public node?: (flyteidl.core.INode|null); /** ArrayNode parallelism. */ - public parallelism: Long; + public parallelism: number; /** ArrayNode minSuccesses. */ public minSuccesses: number; @@ -4536,6 +4536,9 @@ export namespace flyteidl { /** ArrayNode minSuccessRatio. */ public minSuccessRatio: number; + /** ArrayNode parallelismOption. */ + public parallelismOption?: "parallelism"; + /** ArrayNode successCriteria. */ public successCriteria?: ("minSuccesses"|"minSuccessRatio"); diff --git a/flyteidl/gen/pb-js/flyteidl.js b/flyteidl/gen/pb-js/flyteidl.js index e288f8e111..8a47c9bf30 100644 --- a/flyteidl/gen/pb-js/flyteidl.js +++ b/flyteidl/gen/pb-js/flyteidl.js @@ -10808,7 +10808,7 @@ * @memberof flyteidl.core * @interface IArrayNode * @property {flyteidl.core.INode|null} [node] ArrayNode node - * @property {Long|null} [parallelism] ArrayNode parallelism + * @property {number|null} [parallelism] ArrayNode parallelism * @property {number|null} [minSuccesses] ArrayNode minSuccesses * @property {number|null} [minSuccessRatio] ArrayNode minSuccessRatio */ @@ -10838,11 +10838,11 @@ /** * ArrayNode parallelism. - * @member {Long} parallelism + * @member {number} parallelism * @memberof flyteidl.core.ArrayNode * @instance */ - ArrayNode.prototype.parallelism = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ArrayNode.prototype.parallelism = 0; /** * ArrayNode minSuccesses. @@ -10863,6 +10863,17 @@ // OneOf field names bound to virtual getters and setters var $oneOfFields; + /** + * ArrayNode parallelismOption. + * @member {"parallelism"|undefined} parallelismOption + * @memberof flyteidl.core.ArrayNode + * @instance + */ + Object.defineProperty(ArrayNode.prototype, "parallelismOption", { + get: $util.oneOfGetter($oneOfFields = ["parallelism"]), + set: $util.oneOfSetter($oneOfFields) + }); + /** * ArrayNode successCriteria. * @member {"minSuccesses"|"minSuccessRatio"|undefined} successCriteria @@ -10901,7 +10912,7 @@ if (message.node != null && message.hasOwnProperty("node")) $root.flyteidl.core.Node.encode(message.node, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.parallelism != null && message.hasOwnProperty("parallelism")) - writer.uint32(/* id 2, wireType 0 =*/16).int64(message.parallelism); + writer.uint32(/* id 2, wireType 0 =*/16).uint32(message.parallelism); if (message.minSuccesses != null && message.hasOwnProperty("minSuccesses")) writer.uint32(/* id 3, wireType 0 =*/24).uint32(message.minSuccesses); if (message.minSuccessRatio != null && message.hasOwnProperty("minSuccessRatio")) @@ -10931,7 +10942,7 @@ message.node = $root.flyteidl.core.Node.decode(reader, reader.uint32()); break; case 2: - message.parallelism = reader.int64(); + message.parallelism = reader.uint32(); break; case 3: message.minSuccesses = reader.uint32(); @@ -10964,9 +10975,11 @@ if (error) return "node." + error; } - if (message.parallelism != null && message.hasOwnProperty("parallelism")) - if (!$util.isInteger(message.parallelism) && !(message.parallelism && $util.isInteger(message.parallelism.low) && $util.isInteger(message.parallelism.high))) - return "parallelism: integer|Long expected"; + if (message.parallelism != null && message.hasOwnProperty("parallelism")) { + properties.parallelismOption = 1; + if (!$util.isInteger(message.parallelism)) + return "parallelism: integer expected"; + } if (message.minSuccesses != null && message.hasOwnProperty("minSuccesses")) { properties.successCriteria = 1; if (!$util.isInteger(message.minSuccesses)) diff --git a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py index 452c38c9c9..2113db78fe 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.py @@ -20,9 +20,10 @@ from flyteidl.core import types_pb2 as flyteidl_dot_core_dot_types__pb2 from flyteidl.core import security_pb2 as flyteidl_dot_core_dot_security__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/workflow.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/condition.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x19\x66lyteidl/core/types.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\"{\n\x07IfBlock\x12>\n\tcondition\x18\x01 \x01(\x0b\x32 .flyteidl.core.BooleanExpressionR\tcondition\x12\x30\n\tthen_node\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x08thenNode\"\xd4\x01\n\x0bIfElseBlock\x12*\n\x04\x63\x61se\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.IfBlockR\x04\x63\x61se\x12,\n\x05other\x18\x02 \x03(\x0b\x32\x16.flyteidl.core.IfBlockR\x05other\x12\x32\n\telse_node\x18\x03 \x01(\x0b\x32\x13.flyteidl.core.NodeH\x00R\x08\x65lseNode\x12,\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rrorB\t\n\x07\x64\x65\x66\x61ult\"A\n\nBranchNode\x12\x33\n\x07if_else\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.IfElseBlockR\x06ifElse\"\x97\x01\n\x08TaskNode\x12>\n\x0creference_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0breferenceId\x12>\n\toverrides\x18\x02 \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverridesB\x0b\n\treference\"\xa6\x01\n\x0cWorkflowNode\x12\x42\n\x0elaunchplan_ref\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\rlaunchplanRef\x12\x45\n\x10sub_workflow_ref\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0esubWorkflowRefB\x0b\n\treference\"/\n\x10\x41pproveCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\"\x90\x01\n\x0fSignalCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\x12\x30\n\x14output_variable_name\x18\x03 \x01(\tR\x12outputVariableName\"G\n\x0eSleepCondition\x12\x35\n\x08\x64uration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\"\xc5\x01\n\x08GateNode\x12;\n\x07\x61pprove\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.ApproveConditionH\x00R\x07\x61pprove\x12\x38\n\x06signal\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.SignalConditionH\x00R\x06signal\x12\x35\n\x05sleep\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.SleepConditionH\x00R\x05sleepB\x0b\n\tcondition\"\xbf\x01\n\tArrayNode\x12\'\n\x04node\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x04node\x12 \n\x0bparallelism\x18\x02 \x01(\x03R\x0bparallelism\x12%\n\rmin_successes\x18\x03 \x01(\rH\x00R\x0cminSuccesses\x12,\n\x11min_success_ratio\x18\x04 \x01(\x02H\x00R\x0fminSuccessRatioB\x12\n\x10success_criteria\"\x8c\x03\n\x0cNodeMetadata\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12&\n\rinterruptible\x18\x06 \x01(\x08H\x00R\rinterruptible\x12\x1e\n\tcacheable\x18\x07 \x01(\x08H\x01R\tcacheable\x12%\n\rcache_version\x18\x08 \x01(\tH\x02R\x0c\x63\x61\x63heVersion\x12/\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08H\x03R\x11\x63\x61\x63heSerializableB\x15\n\x13interruptible_valueB\x11\n\x0f\x63\x61\x63heable_valueB\x15\n\x13\x63\x61\x63he_version_valueB\x1a\n\x18\x63\x61\x63he_serializable_value\"/\n\x05\x41lias\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x14\n\x05\x61lias\x18\x02 \x01(\tR\x05\x61lias\"\x9f\x04\n\x04Node\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.NodeMetadataR\x08metadata\x12.\n\x06inputs\x18\x03 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x06inputs\x12*\n\x11upstream_node_ids\x18\x04 \x03(\tR\x0fupstreamNodeIds\x12;\n\x0eoutput_aliases\x18\x05 \x03(\x0b\x32\x14.flyteidl.core.AliasR\routputAliases\x12\x36\n\ttask_node\x18\x06 \x01(\x0b\x32\x17.flyteidl.core.TaskNodeH\x00R\x08taskNode\x12\x42\n\rworkflow_node\x18\x07 \x01(\x0b\x32\x1b.flyteidl.core.WorkflowNodeH\x00R\x0cworkflowNode\x12<\n\x0b\x62ranch_node\x18\x08 \x01(\x0b\x32\x19.flyteidl.core.BranchNodeH\x00R\nbranchNode\x12\x36\n\tgate_node\x18\t \x01(\x0b\x32\x17.flyteidl.core.GateNodeH\x00R\x08gateNode\x12\x39\n\narray_node\x18\n \x01(\x0b\x32\x18.flyteidl.core.ArrayNodeH\x00R\tarrayNodeB\x08\n\x06target\"\xfc\x02\n\x10WorkflowMetadata\x12M\n\x12quality_of_service\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12N\n\non_failure\x18\x02 \x01(\x0e\x32/.flyteidl.core.WorkflowMetadata.OnFailurePolicyR\tonFailure\x12=\n\x04tags\x18\x03 \x03(\x0b\x32).flyteidl.core.WorkflowMetadata.TagsEntryR\x04tags\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"Q\n\x0fOnFailurePolicy\x12\x14\n\x10\x46\x41IL_IMMEDIATELY\x10\x00\x12(\n$FAIL_AFTER_EXECUTABLE_NODES_COMPLETE\x10\x01\"@\n\x18WorkflowMetadataDefaults\x12$\n\rinterruptible\x18\x01 \x01(\x08R\rinterruptible\"\xa2\x03\n\x10WorkflowTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\x08metadata\x18\x02 \x01(\x0b\x32\x1f.flyteidl.core.WorkflowMetadataR\x08metadata\x12;\n\tinterface\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12)\n\x05nodes\x18\x04 \x03(\x0b\x32\x13.flyteidl.core.NodeR\x05nodes\x12\x30\n\x07outputs\x18\x05 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x07outputs\x12\x36\n\x0c\x66\x61ilure_node\x18\x06 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x0b\x66\x61ilureNode\x12T\n\x11metadata_defaults\x18\x07 \x01(\x0b\x32\'.flyteidl.core.WorkflowMetadataDefaultsR\x10metadataDefaults\"\xc5\x01\n\x11TaskNodeOverrides\x12\x36\n\tresources\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x12\x65xtended_resources\x18\x02 \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12\'\n\x0f\x63ontainer_image\x18\x03 \x01(\tR\x0e\x63ontainerImage\"\xba\x01\n\x12LaunchPlanTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\tinterface\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12<\n\x0c\x66ixed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ixedInputsB\xb3\x01\n\x11\x63om.flyteidl.coreB\rWorkflowProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lyteidl/core/workflow.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/condition.proto\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1e\x66lyteidl/core/identifier.proto\x1a\x1d\x66lyteidl/core/interface.proto\x1a\x1c\x66lyteidl/core/literals.proto\x1a\x19\x66lyteidl/core/tasks.proto\x1a\x19\x66lyteidl/core/types.proto\x1a\x1c\x66lyteidl/core/security.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\"{\n\x07IfBlock\x12>\n\tcondition\x18\x01 \x01(\x0b\x32 .flyteidl.core.BooleanExpressionR\tcondition\x12\x30\n\tthen_node\x18\x02 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x08thenNode\"\xd4\x01\n\x0bIfElseBlock\x12*\n\x04\x63\x61se\x18\x01 \x01(\x0b\x32\x16.flyteidl.core.IfBlockR\x04\x63\x61se\x12,\n\x05other\x18\x02 \x03(\x0b\x32\x16.flyteidl.core.IfBlockR\x05other\x12\x32\n\telse_node\x18\x03 \x01(\x0b\x32\x13.flyteidl.core.NodeH\x00R\x08\x65lseNode\x12,\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x14.flyteidl.core.ErrorH\x00R\x05\x65rrorB\t\n\x07\x64\x65\x66\x61ult\"A\n\nBranchNode\x12\x33\n\x07if_else\x18\x01 \x01(\x0b\x32\x1a.flyteidl.core.IfElseBlockR\x06ifElse\"\x97\x01\n\x08TaskNode\x12>\n\x0creference_id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0breferenceId\x12>\n\toverrides\x18\x02 \x01(\x0b\x32 .flyteidl.core.TaskNodeOverridesR\toverridesB\x0b\n\treference\"\xa6\x01\n\x0cWorkflowNode\x12\x42\n\x0elaunchplan_ref\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\rlaunchplanRef\x12\x45\n\x10sub_workflow_ref\x18\x02 \x01(\x0b\x32\x19.flyteidl.core.IdentifierH\x00R\x0esubWorkflowRefB\x0b\n\treference\"/\n\x10\x41pproveCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\"\x90\x01\n\x0fSignalCondition\x12\x1b\n\tsignal_id\x18\x01 \x01(\tR\x08signalId\x12.\n\x04type\x18\x02 \x01(\x0b\x32\x1a.flyteidl.core.LiteralTypeR\x04type\x12\x30\n\x14output_variable_name\x18\x03 \x01(\tR\x12outputVariableName\"G\n\x0eSleepCondition\x12\x35\n\x08\x64uration\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x08\x64uration\"\xc5\x01\n\x08GateNode\x12;\n\x07\x61pprove\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.ApproveConditionH\x00R\x07\x61pprove\x12\x38\n\x06signal\x18\x02 \x01(\x0b\x32\x1e.flyteidl.core.SignalConditionH\x00R\x06signal\x12\x35\n\x05sleep\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.SleepConditionH\x00R\x05sleepB\x0b\n\tcondition\"\xd7\x01\n\tArrayNode\x12\'\n\x04node\x18\x01 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x04node\x12\"\n\x0bparallelism\x18\x02 \x01(\rH\x00R\x0bparallelism\x12%\n\rmin_successes\x18\x03 \x01(\rH\x01R\x0cminSuccesses\x12,\n\x11min_success_ratio\x18\x04 \x01(\x02H\x01R\x0fminSuccessRatioB\x14\n\x12parallelism_optionB\x12\n\x10success_criteria\"\x8c\x03\n\x0cNodeMetadata\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x33\n\x07timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x07timeout\x12\x36\n\x07retries\x18\x05 \x01(\x0b\x32\x1c.flyteidl.core.RetryStrategyR\x07retries\x12&\n\rinterruptible\x18\x06 \x01(\x08H\x00R\rinterruptible\x12\x1e\n\tcacheable\x18\x07 \x01(\x08H\x01R\tcacheable\x12%\n\rcache_version\x18\x08 \x01(\tH\x02R\x0c\x63\x61\x63heVersion\x12/\n\x12\x63\x61\x63he_serializable\x18\t \x01(\x08H\x03R\x11\x63\x61\x63heSerializableB\x15\n\x13interruptible_valueB\x11\n\x0f\x63\x61\x63heable_valueB\x15\n\x13\x63\x61\x63he_version_valueB\x1a\n\x18\x63\x61\x63he_serializable_value\"/\n\x05\x41lias\x12\x10\n\x03var\x18\x01 \x01(\tR\x03var\x12\x14\n\x05\x61lias\x18\x02 \x01(\tR\x05\x61lias\"\x9f\x04\n\x04Node\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32\x1b.flyteidl.core.NodeMetadataR\x08metadata\x12.\n\x06inputs\x18\x03 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x06inputs\x12*\n\x11upstream_node_ids\x18\x04 \x03(\tR\x0fupstreamNodeIds\x12;\n\x0eoutput_aliases\x18\x05 \x03(\x0b\x32\x14.flyteidl.core.AliasR\routputAliases\x12\x36\n\ttask_node\x18\x06 \x01(\x0b\x32\x17.flyteidl.core.TaskNodeH\x00R\x08taskNode\x12\x42\n\rworkflow_node\x18\x07 \x01(\x0b\x32\x1b.flyteidl.core.WorkflowNodeH\x00R\x0cworkflowNode\x12<\n\x0b\x62ranch_node\x18\x08 \x01(\x0b\x32\x19.flyteidl.core.BranchNodeH\x00R\nbranchNode\x12\x36\n\tgate_node\x18\t \x01(\x0b\x32\x17.flyteidl.core.GateNodeH\x00R\x08gateNode\x12\x39\n\narray_node\x18\n \x01(\x0b\x32\x18.flyteidl.core.ArrayNodeH\x00R\tarrayNodeB\x08\n\x06target\"\xfc\x02\n\x10WorkflowMetadata\x12M\n\x12quality_of_service\x18\x01 \x01(\x0b\x32\x1f.flyteidl.core.QualityOfServiceR\x10qualityOfService\x12N\n\non_failure\x18\x02 \x01(\x0e\x32/.flyteidl.core.WorkflowMetadata.OnFailurePolicyR\tonFailure\x12=\n\x04tags\x18\x03 \x03(\x0b\x32).flyteidl.core.WorkflowMetadata.TagsEntryR\x04tags\x1a\x37\n\tTagsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"Q\n\x0fOnFailurePolicy\x12\x14\n\x10\x46\x41IL_IMMEDIATELY\x10\x00\x12(\n$FAIL_AFTER_EXECUTABLE_NODES_COMPLETE\x10\x01\"@\n\x18WorkflowMetadataDefaults\x12$\n\rinterruptible\x18\x01 \x01(\x08R\rinterruptible\"\xa2\x03\n\x10WorkflowTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\x08metadata\x18\x02 \x01(\x0b\x32\x1f.flyteidl.core.WorkflowMetadataR\x08metadata\x12;\n\tinterface\x18\x03 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12)\n\x05nodes\x18\x04 \x03(\x0b\x32\x13.flyteidl.core.NodeR\x05nodes\x12\x30\n\x07outputs\x18\x05 \x03(\x0b\x32\x16.flyteidl.core.BindingR\x07outputs\x12\x36\n\x0c\x66\x61ilure_node\x18\x06 \x01(\x0b\x32\x13.flyteidl.core.NodeR\x0b\x66\x61ilureNode\x12T\n\x11metadata_defaults\x18\x07 \x01(\x0b\x32\'.flyteidl.core.WorkflowMetadataDefaultsR\x10metadataDefaults\"\xc5\x01\n\x11TaskNodeOverrides\x12\x36\n\tresources\x18\x01 \x01(\x0b\x32\x18.flyteidl.core.ResourcesR\tresources\x12O\n\x12\x65xtended_resources\x18\x02 \x01(\x0b\x32 .flyteidl.core.ExtendedResourcesR\x11\x65xtendedResources\x12\'\n\x0f\x63ontainer_image\x18\x03 \x01(\tR\x0e\x63ontainerImage\"\xba\x01\n\x12LaunchPlanTemplate\x12)\n\x02id\x18\x01 \x01(\x0b\x32\x19.flyteidl.core.IdentifierR\x02id\x12;\n\tinterface\x18\x02 \x01(\x0b\x32\x1d.flyteidl.core.TypedInterfaceR\tinterface\x12<\n\x0c\x66ixed_inputs\x18\x03 \x01(\x0b\x32\x19.flyteidl.core.LiteralMapR\x0b\x66ixedInputsB\xb3\x01\n\x11\x63om.flyteidl.coreB\rWorkflowProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,44 +34,44 @@ DESCRIPTOR._serialized_options = b'\n\021com.flyteidl.coreB\rWorkflowProtoP\001Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\242\002\003FCX\252\002\rFlyteidl.Core\312\002\rFlyteidl\\Core\342\002\031Flyteidl\\Core\\GPBMetadata\352\002\016Flyteidl::Core' _WORKFLOWMETADATA_TAGSENTRY._options = None _WORKFLOWMETADATA_TAGSENTRY._serialized_options = b'8\001' - _globals['_IFBLOCK']._serialized_start=318 - _globals['_IFBLOCK']._serialized_end=441 - _globals['_IFELSEBLOCK']._serialized_start=444 - _globals['_IFELSEBLOCK']._serialized_end=656 - _globals['_BRANCHNODE']._serialized_start=658 - _globals['_BRANCHNODE']._serialized_end=723 - _globals['_TASKNODE']._serialized_start=726 - _globals['_TASKNODE']._serialized_end=877 - _globals['_WORKFLOWNODE']._serialized_start=880 - _globals['_WORKFLOWNODE']._serialized_end=1046 - _globals['_APPROVECONDITION']._serialized_start=1048 - _globals['_APPROVECONDITION']._serialized_end=1095 - _globals['_SIGNALCONDITION']._serialized_start=1098 - _globals['_SIGNALCONDITION']._serialized_end=1242 - _globals['_SLEEPCONDITION']._serialized_start=1244 - _globals['_SLEEPCONDITION']._serialized_end=1315 - _globals['_GATENODE']._serialized_start=1318 - _globals['_GATENODE']._serialized_end=1515 - _globals['_ARRAYNODE']._serialized_start=1518 - _globals['_ARRAYNODE']._serialized_end=1709 - _globals['_NODEMETADATA']._serialized_start=1712 - _globals['_NODEMETADATA']._serialized_end=2108 - _globals['_ALIAS']._serialized_start=2110 - _globals['_ALIAS']._serialized_end=2157 - _globals['_NODE']._serialized_start=2160 - _globals['_NODE']._serialized_end=2703 - _globals['_WORKFLOWMETADATA']._serialized_start=2706 - _globals['_WORKFLOWMETADATA']._serialized_end=3086 - _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_start=2948 - _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_end=3003 - _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_start=3005 - _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_end=3086 - _globals['_WORKFLOWMETADATADEFAULTS']._serialized_start=3088 - _globals['_WORKFLOWMETADATADEFAULTS']._serialized_end=3152 - _globals['_WORKFLOWTEMPLATE']._serialized_start=3155 - _globals['_WORKFLOWTEMPLATE']._serialized_end=3573 - _globals['_TASKNODEOVERRIDES']._serialized_start=3576 - _globals['_TASKNODEOVERRIDES']._serialized_end=3773 - _globals['_LAUNCHPLANTEMPLATE']._serialized_start=3776 - _globals['_LAUNCHPLANTEMPLATE']._serialized_end=3962 + _globals['_IFBLOCK']._serialized_start=350 + _globals['_IFBLOCK']._serialized_end=473 + _globals['_IFELSEBLOCK']._serialized_start=476 + _globals['_IFELSEBLOCK']._serialized_end=688 + _globals['_BRANCHNODE']._serialized_start=690 + _globals['_BRANCHNODE']._serialized_end=755 + _globals['_TASKNODE']._serialized_start=758 + _globals['_TASKNODE']._serialized_end=909 + _globals['_WORKFLOWNODE']._serialized_start=912 + _globals['_WORKFLOWNODE']._serialized_end=1078 + _globals['_APPROVECONDITION']._serialized_start=1080 + _globals['_APPROVECONDITION']._serialized_end=1127 + _globals['_SIGNALCONDITION']._serialized_start=1130 + _globals['_SIGNALCONDITION']._serialized_end=1274 + _globals['_SLEEPCONDITION']._serialized_start=1276 + _globals['_SLEEPCONDITION']._serialized_end=1347 + _globals['_GATENODE']._serialized_start=1350 + _globals['_GATENODE']._serialized_end=1547 + _globals['_ARRAYNODE']._serialized_start=1550 + _globals['_ARRAYNODE']._serialized_end=1765 + _globals['_NODEMETADATA']._serialized_start=1768 + _globals['_NODEMETADATA']._serialized_end=2164 + _globals['_ALIAS']._serialized_start=2166 + _globals['_ALIAS']._serialized_end=2213 + _globals['_NODE']._serialized_start=2216 + _globals['_NODE']._serialized_end=2759 + _globals['_WORKFLOWMETADATA']._serialized_start=2762 + _globals['_WORKFLOWMETADATA']._serialized_end=3142 + _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_start=3004 + _globals['_WORKFLOWMETADATA_TAGSENTRY']._serialized_end=3059 + _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_start=3061 + _globals['_WORKFLOWMETADATA_ONFAILUREPOLICY']._serialized_end=3142 + _globals['_WORKFLOWMETADATADEFAULTS']._serialized_start=3144 + _globals['_WORKFLOWMETADATADEFAULTS']._serialized_end=3208 + _globals['_WORKFLOWTEMPLATE']._serialized_start=3211 + _globals['_WORKFLOWTEMPLATE']._serialized_end=3629 + _globals['_TASKNODEOVERRIDES']._serialized_start=3632 + _globals['_TASKNODEOVERRIDES']._serialized_end=3829 + _globals['_LAUNCHPLANTEMPLATE']._serialized_start=3832 + _globals['_LAUNCHPLANTEMPLATE']._serialized_end=4018 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi index 11efca2392..5c6f7f69d1 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/workflow_pb2.pyi @@ -7,6 +7,7 @@ from flyteidl.core import tasks_pb2 as _tasks_pb2 from flyteidl.core import types_pb2 as _types_pb2 from flyteidl.core import security_pb2 as _security_pb2 from google.protobuf import duration_pb2 as _duration_pb2 +from google.protobuf import wrappers_pb2 as _wrappers_pb2 from google.protobuf.internal import containers as _containers from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index 6922855798..5612d9c7a2 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -2397,18 +2397,24 @@ pub struct ArrayNode { /// node is the sub-node that will be executed for each element in the array. #[prost(message, optional, boxed, tag="1")] pub node: ::core::option::Option<::prost::alloc::boxed::Box>, - /// parallelism defines the minimum number of instances to bring up concurrently at any given - /// point. Note that this is an optimistic restriction and that, due to network partitioning or - /// other failures, the actual number of currently running instances might be more. This has to - /// be a positive number if assigned. Default value is size. - #[prost(int64, tag="2")] - pub parallelism: i64, + #[prost(oneof="array_node::ParallelismOption", tags="2")] + pub parallelism_option: ::core::option::Option, #[prost(oneof="array_node::SuccessCriteria", tags="3, 4")] pub success_criteria: ::core::option::Option, } /// Nested message and enum types in `ArrayNode`. pub mod array_node { #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ParallelismOption { + /// parallelism defines the minimum number of instances to bring up concurrently at any given + /// point. Note that this is an optimistic restriction and that, due to network partitioning or + /// other failures, the actual number of currently running instances might be more. This has to + /// be a positive number if assigned. Default value is size. + #[prost(uint32, tag="2")] + Parallelism(u32), + } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum SuccessCriteria { /// min_successes is an absolute number of the minimum number of successful completions of diff --git a/flyteidl/protos/flyteidl/core/workflow.proto b/flyteidl/protos/flyteidl/core/workflow.proto index a305c8fad7..ec38904cbf 100644 --- a/flyteidl/protos/flyteidl/core/workflow.proto +++ b/flyteidl/protos/flyteidl/core/workflow.proto @@ -13,6 +13,7 @@ import "flyteidl/core/tasks.proto"; import "flyteidl/core/types.proto"; import "flyteidl/core/security.proto"; import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; // Defines a condition and the execution unit that should be executed if the condition is satisfied. message IfBlock { @@ -114,11 +115,13 @@ message ArrayNode { // node is the sub-node that will be executed for each element in the array. Node node = 1; - // parallelism defines the minimum number of instances to bring up concurrently at any given - // point. Note that this is an optimistic restriction and that, due to network partitioning or - // other failures, the actual number of currently running instances might be more. This has to - // be a positive number if assigned. Default value is size. - int64 parallelism = 2; + oneof parallelism_option { + // parallelism defines the minimum number of instances to bring up concurrently at any given + // point. Note that this is an optimistic restriction and that, due to network partitioning or + // other failures, the actual number of currently running instances might be more. This has to + // be a positive number if assigned. Default value is size. + uint32 parallelism = 2; + } oneof success_criteria { // min_successes is an absolute number of the minimum number of successful completions of diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array.go index 9916f6a075..d8ba50546c 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array.go @@ -2,7 +2,7 @@ package v1alpha1 type ArrayNodeSpec struct { SubNodeSpec *NodeSpec - Parallelism int64 + Parallelism *uint32 MinSuccesses *uint32 MinSuccessRatio *float32 } @@ -11,7 +11,7 @@ func (a *ArrayNodeSpec) GetSubNodeSpec() *NodeSpec { return a.SubNodeSpec } -func (a *ArrayNodeSpec) GetParallelism() int64 { +func (a *ArrayNodeSpec) GetParallelism() *uint32 { return a.Parallelism } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array_test.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array_test.go index c17051b6bd..1cd249acca 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array_test.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/array_test.go @@ -16,12 +16,12 @@ func TestArrayNodeSpec_GetSubNodeSpec(t *testing.T) { } func TestArrayNodeSpec_GetParallelism(t *testing.T) { - parallelism := int64(5) + parallelism := uint32(5) arrayNodeSpec := ArrayNodeSpec{ - Parallelism: parallelism, + Parallelism: ¶llelism, } - if arrayNodeSpec.GetParallelism() != parallelism { + if arrayNodeSpec.GetParallelism() != ¶llelism { t.Errorf("Expected %d, but got %d", parallelism, arrayNodeSpec.GetParallelism()) } } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go index d6f07b856f..f92cca4a5a 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/iface.go @@ -258,7 +258,7 @@ type ExecutableGateNode interface { type ExecutableArrayNode interface { GetSubNodeSpec() *NodeSpec - GetParallelism() int64 + GetParallelism() *uint32 GetMinSuccesses() *uint32 GetMinSuccessRatio() *float32 } diff --git a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNode.go b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNode.go index 742ceb2dbb..8634d1175c 100644 --- a/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNode.go +++ b/flytepropeller/pkg/apis/flyteworkflow/v1alpha1/mocks/ExecutableArrayNode.go @@ -84,7 +84,7 @@ type ExecutableArrayNode_GetParallelism struct { *mock.Call } -func (_m ExecutableArrayNode_GetParallelism) Return(_a0 int64) *ExecutableArrayNode_GetParallelism { +func (_m ExecutableArrayNode_GetParallelism) Return(_a0 *uint32) *ExecutableArrayNode_GetParallelism { return &ExecutableArrayNode_GetParallelism{Call: _m.Call.Return(_a0)} } @@ -99,14 +99,16 @@ func (_m *ExecutableArrayNode) OnGetParallelismMatch(matchers ...interface{}) *E } // GetParallelism provides a mock function with given fields: -func (_m *ExecutableArrayNode) GetParallelism() int64 { +func (_m *ExecutableArrayNode) GetParallelism() *uint32 { ret := _m.Called() - var r0 int64 - if rf, ok := ret.Get(0).(func() int64); ok { + var r0 *uint32 + if rf, ok := ret.Get(0).(func() *uint32); ok { r0 = rf() } else { - r0 = ret.Get(0).(int64) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*uint32) + } } return r0 diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node.go b/flytepropeller/pkg/compiler/transformers/k8s/node.go index 7b5df9b3b5..8a4c9248ec 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/node.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/node.go @@ -178,11 +178,17 @@ func buildNodeSpec(n *core.Node, tasks []*core.CompiledTask, errs errors.Compile return nil, ok } + var parallelism *uint32 + switch x := arrayNode.GetParallelismOption().(type) { + case *core.ArrayNode_Parallelism: + parallelism = &x.Parallelism + } + // build ArrayNode nodeSpec.Kind = v1alpha1.NodeKindArray nodeSpec.ArrayNode = &v1alpha1.ArrayNodeSpec{ SubNodeSpec: subNodeSpecs[0], - Parallelism: arrayNode.Parallelism, + Parallelism: parallelism, } switch successCriteria := arrayNode.SuccessCriteria.(type) { diff --git a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go index ff1f263b6e..c6a08b5991 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/node_test.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/node_test.go @@ -296,7 +296,34 @@ func TestBuildNodeSpec(t *testing.T) { }, }, }, - Parallelism: 10, + ParallelismOption: &core.ArrayNode_Parallelism{ + Parallelism: 10, + }, + SuccessCriteria: &core.ArrayNode_MinSuccessRatio{ + MinSuccessRatio: 0.5, + }, + }, + } + + mustBuild(t, n, 1, errs.NewScope()) + specs, ok := buildNodeSpec(n.GetCoreNode(), tasks, errs) + assert.True(t, ok) + assert.Len(t, specs, 1) + assert.Equal(t, *specs[0].ArrayNode.Parallelism, uint32(10)) + + n.Node.Target = &core.Node_ArrayNode{ + ArrayNode: &core.ArrayNode{ + Node: &core.Node{ + Id: "foo", + Target: &core.Node_TaskNode{ + TaskNode: &core.TaskNode{ + Reference: &core.TaskNode_ReferenceId{ + ReferenceId: &core.Identifier{Name: "ref_1"}, + }, + }, + }, + }, + ParallelismOption: nil, SuccessCriteria: &core.ArrayNode_MinSuccessRatio{ MinSuccessRatio: 0.5, }, @@ -304,6 +331,10 @@ func TestBuildNodeSpec(t *testing.T) { } mustBuild(t, n, 1, errs.NewScope()) + specs, ok = buildNodeSpec(n.GetCoreNode(), tasks, errs) + assert.True(t, ok) + assert.Len(t, specs, 1) + assert.Nil(t, specs[0].ArrayNode.Parallelism) }) } diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go index 1084326a33..1699d47de0 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler.go +++ b/flytepropeller/pkg/controller/nodes/array/handler.go @@ -255,7 +255,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu availableParallelism := 0 // using the workflow's parallelism if the array node parallelism is not set - useWorkflowParallelism := int(arrayNode.GetParallelism()) == -1 + useWorkflowParallelism := arrayNode.GetParallelism() == nil if useWorkflowParallelism { // greedily take all available slots // TODO: This will need to be re-evaluated if we want to support dynamics & sub_workflows @@ -263,7 +263,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu maxParallelism := nCtx.ExecutionContext().GetExecutionConfig().MaxParallelism availableParallelism = int(maxParallelism - currentParallelism) } else { - availableParallelism = int(arrayNode.GetParallelism()) + availableParallelism = int(*arrayNode.GetParallelism()) if availableParallelism == 0 { availableParallelism = len(arrayNodeState.SubNodePhases.GetItems()) } diff --git a/flytepropeller/pkg/controller/nodes/array/handler_test.go b/flytepropeller/pkg/controller/nodes/array/handler_test.go index f514790542..b1f055c483 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/array/handler_test.go @@ -456,6 +456,10 @@ func TestHandleArrayNodePhaseNone(t *testing.T) { } } +func uint32Ptr(v uint32) *uint32 { + return &v +} + func TestHandleArrayNodePhaseExecuting(t *testing.T) { ctx := context.Background() minSuccessRatio := float32(0.5) @@ -478,7 +482,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { tests := []struct { name string - parallelism int + parallelism *uint32 minSuccessRatio *float32 subNodePhases []v1alpha1.NodePhase subNodeTaskPhases []core.Phase @@ -491,7 +495,8 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { incrementParallelismCount uint32 }{ { - name: "StartAllSubNodes", + name: "StartAllSubNodes", + parallelism: uint32Ptr(0), subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseQueued, v1alpha1.NodePhaseQueued, @@ -510,7 +515,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, { name: "StartOneSubNodeParallelism", - parallelism: 1, + parallelism: uint32Ptr(1), subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseQueued, v1alpha1.NodePhaseQueued, @@ -528,7 +533,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, { name: "UtilizeWfParallelismAllSubNodes", - parallelism: -1, + parallelism: nil, currentWfParallelism: 0, incrementParallelismCount: 2, subNodePhases: []v1alpha1.NodePhase{ @@ -549,7 +554,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, { name: "UtilizeWfParallelismSomeSubNodes", - parallelism: -1, + parallelism: nil, currentWfParallelism: workflowMaxParallelism - 1, incrementParallelismCount: 1, subNodePhases: []v1alpha1.NodePhase{ @@ -569,7 +574,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, { name: "UtilizeWfParallelismNoSubNodes", - parallelism: -1, + parallelism: nil, currentWfParallelism: workflowMaxParallelism, incrementParallelismCount: 0, subNodePhases: []v1alpha1.NodePhase{ @@ -586,7 +591,8 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{}, }, { - name: "StartSubNodesNewAttempts", + name: "StartSubNodesNewAttempts", + parallelism: uint32Ptr(0), subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseQueued, v1alpha1.NodePhaseQueued, @@ -604,7 +610,8 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_RUNNING, idlcore.TaskExecution_RUNNING}, }, { - name: "AllSubNodesSuccedeed", + name: "AllSubNodesSuccedeed", + parallelism: uint32Ptr(0), subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseRunning, v1alpha1.NodePhaseRunning, @@ -623,6 +630,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { }, { name: "OneSubNodeSuccedeedMinSuccessRatio", + parallelism: uint32Ptr(0), minSuccessRatio: &minSuccessRatio, subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseRunning, @@ -641,7 +649,8 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { expectedExternalResourcePhases: []idlcore.TaskExecution_Phase{idlcore.TaskExecution_SUCCEEDED, idlcore.TaskExecution_FAILED}, }, { - name: "OneSubNodeFailed", + name: "OneSubNodeFailed", + parallelism: uint32Ptr(0), subNodePhases: []v1alpha1.NodePhase{ v1alpha1.NodePhaseRunning, v1alpha1.NodePhaseRunning, @@ -697,7 +706,7 @@ func TestHandleArrayNodePhaseExecuting(t *testing.T) { eventRecorder := newBufferedEventRecorder() nodeSpec := arrayNodeSpec - nodeSpec.ArrayNode.Parallelism = int64(test.parallelism) + nodeSpec.ArrayNode.Parallelism = test.parallelism nodeSpec.ArrayNode.MinSuccessRatio = test.minSuccessRatio nCtx := createNodeExecutionContext(dataStore, eventRecorder, nil, literalMap, &arrayNodeSpec, arrayNodeState, test.currentWfParallelism, workflowMaxParallelism) From 99552560d535dbef9e34773470685a7dac403352 Mon Sep 17 00:00:00 2001 From: Yini <131146298+yini7777@users.noreply.github.com> Date: Thu, 11 Apr 2024 23:22:36 +0200 Subject: [PATCH 12/15] Fix mounting secrets (#5063) Signed-off-by: yini.gao@schibsted.com --- charts/flyte-core/README.md | 2 +- charts/flyte-core/templates/common/secret-auth.yaml | 2 +- ...ues-keycloak-idp-flyteclients-without-browser.yaml | 7 +++++-- charts/flyte-core/values.yaml | 7 +++++-- docker/sandbox-bundled/manifests/complete-agent.yaml | 4 ++-- docker/sandbox-bundled/manifests/complete.yaml | 4 ++-- docker/sandbox-bundled/manifests/dev.yaml | 4 ++-- docs/deployment/configuration/auth_setup.rst | 11 ++++++++--- 8 files changed, 26 insertions(+), 15 deletions(-) diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index 00fd69f368..55d327a25f 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -273,7 +273,7 @@ helm install gateway bitnami/contour -n flyte | flytescheduler.tolerations | list | `[]` | tolerations for Flytescheduler deployment | | secrets.adminOauthClientCredentials.clientId | string | `"flytepropeller"` | | | secrets.adminOauthClientCredentials.clientSecret | string | `"foobar"` | | -| secrets.adminOauthClientCredentials.enabled | bool | `true` | If enabled is true, helm will create and manage `flyte-secret-auth` and populate it with `clientSecret`. If enabled is false, it's up to the user to create `flyte-secret-auth` as described in https://docs.flyte.org/en/latest/deployment/cluster_config/auth_setup.html#oauth2-authorization-server | +| secrets.adminOauthClientCredentials.enabled | bool | `true` | | | sparkoperator | object | `{"enabled":false,"plugin_config":{"plugins":{"spark":{"spark-config-default":[{"spark.hadoop.fs.s3a.aws.credentials.provider":"com.amazonaws.auth.DefaultAWSCredentialsProviderChain"},{"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":"2"},{"spark.kubernetes.allocation.batch.size":"50"},{"spark.hadoop.fs.s3a.acl.default":"BucketOwnerFullControl"},{"spark.hadoop.fs.s3n.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3n.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3a.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.multipart.threshold":"536870912"},{"spark.blacklist.enabled":"true"},{"spark.blacklist.timeout":"5m"},{"spark.task.maxfailures":"8"}]}}}}` | Optional: Spark Plugin using the Spark Operator | | sparkoperator.enabled | bool | `false` | - enable or disable Sparkoperator deployment installation | | sparkoperator.plugin_config | object | `{"plugins":{"spark":{"spark-config-default":[{"spark.hadoop.fs.s3a.aws.credentials.provider":"com.amazonaws.auth.DefaultAWSCredentialsProviderChain"},{"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":"2"},{"spark.kubernetes.allocation.batch.size":"50"},{"spark.hadoop.fs.s3a.acl.default":"BucketOwnerFullControl"},{"spark.hadoop.fs.s3n.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3n.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3a.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.multipart.threshold":"536870912"},{"spark.blacklist.enabled":"true"},{"spark.blacklist.timeout":"5m"},{"spark.task.maxfailures":"8"}]}}}` | Spark plugin configuration | diff --git a/charts/flyte-core/templates/common/secret-auth.yaml b/charts/flyte-core/templates/common/secret-auth.yaml index 50290357d8..d13247bd9a 100644 --- a/charts/flyte-core/templates/common/secret-auth.yaml +++ b/charts/flyte-core/templates/common/secret-auth.yaml @@ -1,4 +1,4 @@ -{{- if .Values.secrets.adminOauthClientCredentials.enabled }} +{{- if and (.Values.secrets.adminOauthClientCredentials.enabled) (not (empty .Values.secrets.adminOauthClientCredentials.clientSecret)) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml index 5b12b7b3f9..22624d8775 100644 --- a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml +++ b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml @@ -298,9 +298,12 @@ deployRedoc: false secrets: adminOauthClientCredentials: - # -- If enabled is true, helm will create and manage `flyte-secret-auth` and populate it with `clientSecret`. - # If enabled is false, it's up to the user to create `flyte-secret-auth` as described in + # If enabled is true, and `clientSecret` is specified, helm will create and mount `flyte-secret-auth`. + # If enabled is true, and `clientSecret` is null, it's up to the user to create `flyte-secret-auth` as described in # https://docs.flyte.org/en/latest/deployment/cluster_config/auth_setup.html#oauth2-authorization-server + # and helm will mount `flyte-secret-auth`. + # If enabled is false, auth is not turned on. + # Note: Unsupported combination: enabled.false and clientSecret.someValue enabled: true clientSecret: "<>" # put the secret for the confidential client flytepropeller defined in the IDP clientId: "flytepropeller" #use this client id and secret in the flytectl config with ClientSecret option diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 8b1e4b89d6..f3580786a6 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -430,9 +430,12 @@ deployRedoc: false secrets: adminOauthClientCredentials: - # -- If enabled is true, helm will create and manage `flyte-secret-auth` and populate it with `clientSecret`. - # If enabled is false, it's up to the user to create `flyte-secret-auth` as described in + # If enabled is true, and `clientSecret` is specified, helm will create and mount `flyte-secret-auth`. + # If enabled is true, and `clientSecret` is null, it's up to the user to create `flyte-secret-auth` as described in # https://docs.flyte.org/en/latest/deployment/cluster_config/auth_setup.html#oauth2-authorization-server + # and helm will mount `flyte-secret-auth`. + # If enabled is false, auth is not turned on. + # Note: Unsupported combination: enabled.false and clientSecret.someValue enabled: true clientSecret: foobar clientId: flytepropeller diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 2db8226933..4b31375fc3 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -816,7 +816,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: WEJxZGNCTWJiNFUwZEd1bQ== + haSharedSecret: WlVScnNIb3I2RFM4UFhrcA== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1412,7 +1412,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 45571013e2bcbc17744162363ccc7fc5c99072b2553a3fe29f41660a07e2e864 + checksum/secret: a041f8b1e9c41f465e4f113957cc10f1b48b2e259a5d193657571ae597305e2c labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 98b5aa3657..4f5f878a00 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -796,7 +796,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: a1Zqcm1HUlcxZUN4SExqSw== + haSharedSecret: VU5MNDc1MDZUU05OWmZOYw== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1360,7 +1360,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: bf4abdac7aaf5a74cf2f12f7511d4af46e5c2b8228637c5eb29f9ed5399e4942 + checksum/secret: 0c9fcdc5ba4f5091dbd31e0a907c4748391313df162b5e1d3ace3084b62cdd40 labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index 49c259b0d9..43144186ce 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: dkdTTTV4ZkNpc2pneXZBRQ== + haSharedSecret: RXhwTzhZT25HZzJjdUllSQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 40299f1a8b9fffd1ef10051c289e1d654de7f755fff5f44cde65a9cf96bcd543 + checksum/secret: 6f8a6d8c2b4e54840abf28822833192923adeb062f926c962e8e0785b96877d5 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/deployment/configuration/auth_setup.rst b/docs/deployment/configuration/auth_setup.rst index d9a7f2c7eb..bb73bef8e3 100644 --- a/docs/deployment/configuration/auth_setup.rst +++ b/docs/deployment/configuration/auth_setup.rst @@ -346,8 +346,12 @@ Apply OIDC Configuration secrets: adminOauthClientCredentials: - # -- If enabled is true, helm will create and manage `flyte-secret-auth` and populate it with `clientSecret`. - # If enabled is false, it's up to the user to create `flyte-secret-auth` + # If enabled is true, and `clientSecret` is specified, helm will create and mount `flyte-secret-auth`. + # If enabled is true, and `clientSecret` is null, it's up to the user to create `flyte-secret-auth` as described in + # https://docs.flyte.org/en/latest/deployment/cluster_config/auth_setup.html#oauth2-authorization-server + # and helm will mount `flyte-secret-auth`. + # If enabled is false, auth is not turned on. + # Note: Unsupported combination: enabled.false and clientSecret.someValue enabled: true # Use the non-encoded version of the random password clientSecret: "" @@ -677,7 +681,8 @@ Alternatively, you can instruct Helm not to create and manage the secret for ``f secrets: adminOauthClientCredentials: - enabled: false #set to false + enabled: true # enable mounting the flyte-secret-auth secret to the flytepropeller. + clientSecret: null # disable Helm from creating the flyte-secret-auth secret. # Replace with the client_id provided by provided by your IdP for flytepropeller. clientId: From 734d6f33b7182fc026894e97e508f06ab4ed5ded Mon Sep 17 00:00:00 2001 From: Nikki Everett Date: Thu, 11 Apr 2024 18:16:28 -0500 Subject: [PATCH 13/15] Update "Creating a Flyte project" with link to new Dockerfile project template (#5215) * update with link to dockerfile template Signed-off-by: nikki everett * fix 404 error Signed-off-by: nikki everett --------- Signed-off-by: nikki everett --- .../creating_a_flyte_project.md | 6 +++--- .../flyte_project_components.md | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/getting_started_with_workflow_development/creating_a_flyte_project.md b/docs/getting_started_with_workflow_development/creating_a_flyte_project.md index 7e9017664f..0e0160ec59 100644 --- a/docs/getting_started_with_workflow_development/creating_a_flyte_project.md +++ b/docs/getting_started_with_workflow_development/creating_a_flyte_project.md @@ -23,7 +23,7 @@ conda activate flyte-example Next, initialize your Flyte project. The [flytekit-python-template GitHub repository](https://github.com/flyteorg/flytekit-python-template) contains Flyte project templates with sample code that you can run as is or modify to suit your needs. -In this example, we will initialize the [basic-example-imagespec project template](https://github.com/flyteorg/flytekit-python-template/tree/main/basic-example-imagespec). +In this example, we will initialize the [basic-template-imagespec project template](https://github.com/flyteorg/flytekit-python-template/tree/main/basic-template-imagespec). ```{prompt} bash $ pyflyte init my_project @@ -31,9 +31,9 @@ pyflyte init my_project :::{note} -To initialize a Flyte project with a different template, use the `--template` parameter: +If you need to use a Dockerfile for your project, you can initialize the Dockerfile template: -`pyflyte init --template hello-world hello-world` +`pyflyte init --template basic-template-dockerfile my_project` ::: ### 3. Install additional requirements diff --git a/docs/getting_started_with_workflow_development/flyte_project_components.md b/docs/getting_started_with_workflow_development/flyte_project_components.md index 8c24946aad..47821c5201 100644 --- a/docs/getting_started_with_workflow_development/flyte_project_components.md +++ b/docs/getting_started_with_workflow_development/flyte_project_components.md @@ -1,3 +1,4 @@ +(flyte_project_components)= # Flyte project components A Flyte project is a directory containing task and workflow code, internal Python source code, configuration files, and other artifacts required to package up your code so that it can be run on a Flyte cluster. @@ -26,13 +27,13 @@ You can specify pip-installable Python dependencies in your project by adding th `requirements.txt` file. ```{note} -We recommend using [pip-compile](https://pip-tools.readthedocs.io/en/latest/) to +We recommend using [pip-compile](https://pip-tools.readthedocs.io/en/stable/) to manage your project's Python requirements. ``` ````{dropdown} See requirements.txt -```{rli} https://raw.githubusercontent.com/flyteorg/flytekit-python-template/main/simple-example/%7B%7Bcookiecutter.project_name%7D%7D/requirements.txt +```{rli} https://raw.githubusercontent.com/flyteorg/flytekit-python-template/main/basic-template-imagespec/%7B%7Bcookiecutter.project_name%7D%7D/requirements.txt :caption: requirements.txt ``` From b36556ea94007395b53d78db3fa28e1b9e39b7f3 Mon Sep 17 00:00:00 2001 From: Nikki Everett Date: Thu, 11 Apr 2024 22:40:21 -0500 Subject: [PATCH 14/15] Re-apply changes to dataclass docs from flytesnacks#1553 (#5211) * copy changes over from flytesnacks#1553 Signed-off-by: nikki everett * fix formatting Signed-off-by: nikki everett * fix 404 error Signed-off-by: nikki everett --------- Signed-off-by: nikki everett --- docs/user_guide/data_types_and_io/dataclass.md | 3 +++ docs/user_guide/data_types_and_io/index.md | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/user_guide/data_types_and_io/dataclass.md b/docs/user_guide/data_types_and_io/dataclass.md index fdb9f1d992..7bdaee0385 100644 --- a/docs/user_guide/data_types_and_io/dataclass.md +++ b/docs/user_guide/data_types_and_io/dataclass.md @@ -33,6 +33,9 @@ to serialize and deserialize dataclasses. :::{important} If you're using Flytekit version below v1.10, you'll need to decorate with `@dataclass_json` using `from dataclass_json import dataclass_json` instead of inheriting from Mashumaro's `DataClassJSONMixin`. + +If you're using Flytekit version >= v1.11.1, you don't need to decorate with `@dataclass_json` or +inherit from Mashumaro's `DataClassJSONMixin`. ::: To begin, import the necessary dependencies. diff --git a/docs/user_guide/data_types_and_io/index.md b/docs/user_guide/data_types_and_io/index.md index f55aec69b4..d03df92804 100644 --- a/docs/user_guide/data_types_and_io/index.md +++ b/docs/user_guide/data_types_and_io/index.md @@ -99,8 +99,7 @@ Here's a breakdown of these mappings: * - ``@dataclass`` - ``Struct`` - Automatic - - The class should be a pure value class that inherits from Mashumaro's DataClassJSONMixin, - and be annotated with the ``@dataclass`` decorator. + - The class should be a pure value class annotated with the ``@dataclass`` decorator. * - ``np.ndarray`` - File - Automatic From c7d1463090482b091e011e902f9d9a0060eed826 Mon Sep 17 00:00:00 2001 From: Chi-Sheng Liu Date: Fri, 12 Apr 2024 12:24:12 +0800 Subject: [PATCH 15/15] feat(ray): Remove initContainers (#5178) Signed-off-by: Chi-Sheng Liu Signed-off-by: Kevin Su Co-authored-by: Kevin Su --- flyteplugins/go/tasks/plugins/k8s/ray/ray.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go index 869b9abd9e..ff0cfc6cd3 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go @@ -380,19 +380,6 @@ func buildSubmitterPodTemplate(podSpec *v1.PodSpec, objectMeta *metav1.ObjectMet func buildWorkerPodTemplate(primaryContainer *v1.Container, podSpec *v1.PodSpec, objectMetadata *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext) v1.PodTemplateSpec { // Some configs are copy from https://github.com/ray-project/kuberay/blob/b72e6bdcd9b8c77a9dc6b5da8560910f3a0c3ffd/apiserver/pkg/util/cluster.go#L185 // They should always be the same, so we could hard code here. - initContainers := []v1.Container{ - { - Name: "init-myservice", - Image: "busybox:1.28", - Command: []string{ - "sh", - "-c", - "until nslookup $RAY_IP.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done", - }, - Resources: primaryContainer.Resources, - }, - } - podSpec.InitContainers = append(podSpec.InitContainers, initContainers...) primaryContainer.Name = "ray-worker"